1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "mgmt_util.h"
38
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42 static DEFINE_IDA(sock_cookie_ida);
43
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46 /* ----- HCI socket interface ----- */
47
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51 struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u8 cmsg_mask;
56 unsigned short channel;
57 unsigned long flags;
58 __u32 cookie;
59 char comm[TASK_COMM_LEN];
60 };
61
hci_hdev_from_sock(struct sock * sk)62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
63 {
64 struct hci_dev *hdev = hci_pi(sk)->hdev;
65
66 if (!hdev)
67 return ERR_PTR(-EBADFD);
68 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69 return ERR_PTR(-EPIPE);
70 return hdev;
71 }
72
hci_sock_set_flag(struct sock * sk,int nr)73 void hci_sock_set_flag(struct sock *sk, int nr)
74 {
75 set_bit(nr, &hci_pi(sk)->flags);
76 }
77
hci_sock_clear_flag(struct sock * sk,int nr)78 void hci_sock_clear_flag(struct sock *sk, int nr)
79 {
80 clear_bit(nr, &hci_pi(sk)->flags);
81 }
82
hci_sock_test_flag(struct sock * sk,int nr)83 int hci_sock_test_flag(struct sock *sk, int nr)
84 {
85 return test_bit(nr, &hci_pi(sk)->flags);
86 }
87
hci_sock_get_channel(struct sock * sk)88 unsigned short hci_sock_get_channel(struct sock *sk)
89 {
90 return hci_pi(sk)->channel;
91 }
92
hci_sock_get_cookie(struct sock * sk)93 u32 hci_sock_get_cookie(struct sock *sk)
94 {
95 return hci_pi(sk)->cookie;
96 }
97
hci_sock_gen_cookie(struct sock * sk)98 static bool hci_sock_gen_cookie(struct sock *sk)
99 {
100 int id = hci_pi(sk)->cookie;
101
102 if (!id) {
103 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
104 if (id < 0)
105 id = 0xffffffff;
106
107 hci_pi(sk)->cookie = id;
108 get_task_comm(hci_pi(sk)->comm, current);
109 return true;
110 }
111
112 return false;
113 }
114
hci_sock_free_cookie(struct sock * sk)115 static void hci_sock_free_cookie(struct sock *sk)
116 {
117 int id = hci_pi(sk)->cookie;
118
119 if (id) {
120 hci_pi(sk)->cookie = 0xffffffff;
121 ida_simple_remove(&sock_cookie_ida, id);
122 }
123 }
124
hci_test_bit(int nr,const void * addr)125 static inline int hci_test_bit(int nr, const void *addr)
126 {
127 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
128 }
129
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF 5
132
133 struct hci_sec_filter {
134 __u32 type_mask;
135 __u32 event_mask[2];
136 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
137 };
138
139 static const struct hci_sec_filter hci_sec_filter = {
140 /* Packet types */
141 0x10,
142 /* Events */
143 { 0x1000d9fe, 0x0000b00c },
144 /* Commands */
145 {
146 { 0x0 },
147 /* OGF_LINK_CTL */
148 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149 /* OGF_LINK_POLICY */
150 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
151 /* OGF_HOST_CTL */
152 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
153 /* OGF_INFO_PARAM */
154 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155 /* OGF_STATUS_PARAM */
156 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
157 }
158 };
159
160 static struct bt_sock_list hci_sk_list = {
161 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
162 };
163
is_filtered_packet(struct sock * sk,struct sk_buff * skb)164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
165 {
166 struct hci_filter *flt;
167 int flt_type, flt_event;
168
169 /* Apply filter */
170 flt = &hci_pi(sk)->filter;
171
172 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
173
174 if (!test_bit(flt_type, &flt->type_mask))
175 return true;
176
177 /* Extra filter for event packets only */
178 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
179 return false;
180
181 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
182
183 if (!hci_test_bit(flt_event, &flt->event_mask))
184 return true;
185
186 /* Check filter only when opcode is set */
187 if (!flt->opcode)
188 return false;
189
190 if (flt_event == HCI_EV_CMD_COMPLETE &&
191 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
192 return true;
193
194 if (flt_event == HCI_EV_CMD_STATUS &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
196 return true;
197
198 return false;
199 }
200
201 /* Send frame to RAW socket */
hci_send_to_sock(struct hci_dev * hdev,struct sk_buff * skb)202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
203 {
204 struct sock *sk;
205 struct sk_buff *skb_copy = NULL;
206
207 BT_DBG("hdev %p len %d", hdev, skb->len);
208
209 read_lock(&hci_sk_list.lock);
210
211 sk_for_each(sk, &hci_sk_list.head) {
212 struct sk_buff *nskb;
213
214 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
215 continue;
216
217 /* Don't send frame to the socket it came from */
218 if (skb->sk == sk)
219 continue;
220
221 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
226 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
227 continue;
228 if (is_filtered_packet(sk, skb))
229 continue;
230 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
231 if (!bt_cb(skb)->incoming)
232 continue;
233 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
234 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
235 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
236 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
237 continue;
238 } else {
239 /* Don't send frame to other channel types */
240 continue;
241 }
242
243 if (!skb_copy) {
244 /* Create a private copy with headroom */
245 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
246 if (!skb_copy)
247 continue;
248
249 /* Put type byte before the data */
250 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
251 }
252
253 nskb = skb_clone(skb_copy, GFP_ATOMIC);
254 if (!nskb)
255 continue;
256
257 if (sock_queue_rcv_skb(sk, nskb))
258 kfree_skb(nskb);
259 }
260
261 read_unlock(&hci_sk_list.lock);
262
263 kfree_skb(skb_copy);
264 }
265
266 /* Send frame to sockets with specific channel */
__hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)267 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
268 int flag, struct sock *skip_sk)
269 {
270 struct sock *sk;
271
272 BT_DBG("channel %u len %d", channel, skb->len);
273
274 sk_for_each(sk, &hci_sk_list.head) {
275 struct sk_buff *nskb;
276
277 /* Ignore socket without the flag set */
278 if (!hci_sock_test_flag(sk, flag))
279 continue;
280
281 /* Skip the original socket */
282 if (sk == skip_sk)
283 continue;
284
285 if (sk->sk_state != BT_BOUND)
286 continue;
287
288 if (hci_pi(sk)->channel != channel)
289 continue;
290
291 nskb = skb_clone(skb, GFP_ATOMIC);
292 if (!nskb)
293 continue;
294
295 if (sock_queue_rcv_skb(sk, nskb))
296 kfree_skb(nskb);
297 }
298
299 }
300
hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)301 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
302 int flag, struct sock *skip_sk)
303 {
304 read_lock(&hci_sk_list.lock);
305 __hci_send_to_channel(channel, skb, flag, skip_sk);
306 read_unlock(&hci_sk_list.lock);
307 }
308
309 /* Send frame to monitor socket */
hci_send_to_monitor(struct hci_dev * hdev,struct sk_buff * skb)310 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
311 {
312 struct sk_buff *skb_copy = NULL;
313 struct hci_mon_hdr *hdr;
314 __le16 opcode;
315
316 if (!atomic_read(&monitor_promisc))
317 return;
318
319 BT_DBG("hdev %p len %d", hdev, skb->len);
320
321 switch (hci_skb_pkt_type(skb)) {
322 case HCI_COMMAND_PKT:
323 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
324 break;
325 case HCI_EVENT_PKT:
326 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
327 break;
328 case HCI_ACLDATA_PKT:
329 if (bt_cb(skb)->incoming)
330 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
331 else
332 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
333 break;
334 case HCI_SCODATA_PKT:
335 if (bt_cb(skb)->incoming)
336 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
337 else
338 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
339 break;
340 case HCI_ISODATA_PKT:
341 if (bt_cb(skb)->incoming)
342 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
343 else
344 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
345 break;
346 case HCI_DIAG_PKT:
347 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
348 break;
349 default:
350 return;
351 }
352
353 /* Create a private copy with headroom */
354 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
355 if (!skb_copy)
356 return;
357
358 /* Put header before the data */
359 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
360 hdr->opcode = opcode;
361 hdr->index = cpu_to_le16(hdev->id);
362 hdr->len = cpu_to_le16(skb->len);
363
364 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
365 HCI_SOCK_TRUSTED, NULL);
366 kfree_skb(skb_copy);
367 }
368
hci_send_monitor_ctrl_event(struct hci_dev * hdev,u16 event,void * data,u16 data_len,ktime_t tstamp,int flag,struct sock * skip_sk)369 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
370 void *data, u16 data_len, ktime_t tstamp,
371 int flag, struct sock *skip_sk)
372 {
373 struct sock *sk;
374 __le16 index;
375
376 if (hdev)
377 index = cpu_to_le16(hdev->id);
378 else
379 index = cpu_to_le16(MGMT_INDEX_NONE);
380
381 read_lock(&hci_sk_list.lock);
382
383 sk_for_each(sk, &hci_sk_list.head) {
384 struct hci_mon_hdr *hdr;
385 struct sk_buff *skb;
386
387 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
388 continue;
389
390 /* Ignore socket without the flag set */
391 if (!hci_sock_test_flag(sk, flag))
392 continue;
393
394 /* Skip the original socket */
395 if (sk == skip_sk)
396 continue;
397
398 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
399 if (!skb)
400 continue;
401
402 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
403 put_unaligned_le16(event, skb_put(skb, 2));
404
405 if (data)
406 skb_put_data(skb, data, data_len);
407
408 skb->tstamp = tstamp;
409
410 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
411 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
412 hdr->index = index;
413 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
414
415 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
416 HCI_SOCK_TRUSTED, NULL);
417 kfree_skb(skb);
418 }
419
420 read_unlock(&hci_sk_list.lock);
421 }
422
create_monitor_event(struct hci_dev * hdev,int event)423 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
424 {
425 struct hci_mon_hdr *hdr;
426 struct hci_mon_new_index *ni;
427 struct hci_mon_index_info *ii;
428 struct sk_buff *skb;
429 __le16 opcode;
430
431 switch (event) {
432 case HCI_DEV_REG:
433 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
434 if (!skb)
435 return NULL;
436
437 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
438 ni->type = hdev->dev_type;
439 ni->bus = hdev->bus;
440 bacpy(&ni->bdaddr, &hdev->bdaddr);
441 memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
442 strnlen(hdev->name, sizeof(ni->name)), '\0');
443
444 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
445 break;
446
447 case HCI_DEV_UNREG:
448 skb = bt_skb_alloc(0, GFP_ATOMIC);
449 if (!skb)
450 return NULL;
451
452 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
453 break;
454
455 case HCI_DEV_SETUP:
456 if (hdev->manufacturer == 0xffff)
457 return NULL;
458 fallthrough;
459
460 case HCI_DEV_UP:
461 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
462 if (!skb)
463 return NULL;
464
465 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
466 bacpy(&ii->bdaddr, &hdev->bdaddr);
467 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
468
469 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
470 break;
471
472 case HCI_DEV_OPEN:
473 skb = bt_skb_alloc(0, GFP_ATOMIC);
474 if (!skb)
475 return NULL;
476
477 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
478 break;
479
480 case HCI_DEV_CLOSE:
481 skb = bt_skb_alloc(0, GFP_ATOMIC);
482 if (!skb)
483 return NULL;
484
485 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
486 break;
487
488 default:
489 return NULL;
490 }
491
492 __net_timestamp(skb);
493
494 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
495 hdr->opcode = opcode;
496 hdr->index = cpu_to_le16(hdev->id);
497 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
498
499 return skb;
500 }
501
create_monitor_ctrl_open(struct sock * sk)502 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
503 {
504 struct hci_mon_hdr *hdr;
505 struct sk_buff *skb;
506 u16 format;
507 u8 ver[3];
508 u32 flags;
509
510 /* No message needed when cookie is not present */
511 if (!hci_pi(sk)->cookie)
512 return NULL;
513
514 switch (hci_pi(sk)->channel) {
515 case HCI_CHANNEL_RAW:
516 format = 0x0000;
517 ver[0] = BT_SUBSYS_VERSION;
518 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
519 break;
520 case HCI_CHANNEL_USER:
521 format = 0x0001;
522 ver[0] = BT_SUBSYS_VERSION;
523 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
524 break;
525 case HCI_CHANNEL_CONTROL:
526 format = 0x0002;
527 mgmt_fill_version_info(ver);
528 break;
529 default:
530 /* No message for unsupported format */
531 return NULL;
532 }
533
534 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
535 if (!skb)
536 return NULL;
537
538 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
539
540 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
541 put_unaligned_le16(format, skb_put(skb, 2));
542 skb_put_data(skb, ver, sizeof(ver));
543 put_unaligned_le32(flags, skb_put(skb, 4));
544 skb_put_u8(skb, TASK_COMM_LEN);
545 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
546
547 __net_timestamp(skb);
548
549 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
550 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
551 if (hci_pi(sk)->hdev)
552 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
553 else
554 hdr->index = cpu_to_le16(HCI_DEV_NONE);
555 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
556
557 return skb;
558 }
559
create_monitor_ctrl_close(struct sock * sk)560 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
561 {
562 struct hci_mon_hdr *hdr;
563 struct sk_buff *skb;
564
565 /* No message needed when cookie is not present */
566 if (!hci_pi(sk)->cookie)
567 return NULL;
568
569 switch (hci_pi(sk)->channel) {
570 case HCI_CHANNEL_RAW:
571 case HCI_CHANNEL_USER:
572 case HCI_CHANNEL_CONTROL:
573 break;
574 default:
575 /* No message for unsupported format */
576 return NULL;
577 }
578
579 skb = bt_skb_alloc(4, GFP_ATOMIC);
580 if (!skb)
581 return NULL;
582
583 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
584
585 __net_timestamp(skb);
586
587 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
588 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
589 if (hci_pi(sk)->hdev)
590 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
591 else
592 hdr->index = cpu_to_le16(HCI_DEV_NONE);
593 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
594
595 return skb;
596 }
597
create_monitor_ctrl_command(struct sock * sk,u16 index,u16 opcode,u16 len,const void * buf)598 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
599 u16 opcode, u16 len,
600 const void *buf)
601 {
602 struct hci_mon_hdr *hdr;
603 struct sk_buff *skb;
604
605 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
606 if (!skb)
607 return NULL;
608
609 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
610 put_unaligned_le16(opcode, skb_put(skb, 2));
611
612 if (buf)
613 skb_put_data(skb, buf, len);
614
615 __net_timestamp(skb);
616
617 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
618 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
619 hdr->index = cpu_to_le16(index);
620 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
621
622 return skb;
623 }
624
625 static void __printf(2, 3)
send_monitor_note(struct sock * sk,const char * fmt,...)626 send_monitor_note(struct sock *sk, const char *fmt, ...)
627 {
628 size_t len;
629 struct hci_mon_hdr *hdr;
630 struct sk_buff *skb;
631 va_list args;
632
633 va_start(args, fmt);
634 len = vsnprintf(NULL, 0, fmt, args);
635 va_end(args);
636
637 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
638 if (!skb)
639 return;
640
641 va_start(args, fmt);
642 vsprintf(skb_put(skb, len), fmt, args);
643 *(u8 *)skb_put(skb, 1) = 0;
644 va_end(args);
645
646 __net_timestamp(skb);
647
648 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
649 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
650 hdr->index = cpu_to_le16(HCI_DEV_NONE);
651 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
652
653 if (sock_queue_rcv_skb(sk, skb))
654 kfree_skb(skb);
655 }
656
send_monitor_replay(struct sock * sk)657 static void send_monitor_replay(struct sock *sk)
658 {
659 struct hci_dev *hdev;
660
661 read_lock(&hci_dev_list_lock);
662
663 list_for_each_entry(hdev, &hci_dev_list, list) {
664 struct sk_buff *skb;
665
666 skb = create_monitor_event(hdev, HCI_DEV_REG);
667 if (!skb)
668 continue;
669
670 if (sock_queue_rcv_skb(sk, skb))
671 kfree_skb(skb);
672
673 if (!test_bit(HCI_RUNNING, &hdev->flags))
674 continue;
675
676 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
677 if (!skb)
678 continue;
679
680 if (sock_queue_rcv_skb(sk, skb))
681 kfree_skb(skb);
682
683 if (test_bit(HCI_UP, &hdev->flags))
684 skb = create_monitor_event(hdev, HCI_DEV_UP);
685 else if (hci_dev_test_flag(hdev, HCI_SETUP))
686 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
687 else
688 skb = NULL;
689
690 if (skb) {
691 if (sock_queue_rcv_skb(sk, skb))
692 kfree_skb(skb);
693 }
694 }
695
696 read_unlock(&hci_dev_list_lock);
697 }
698
send_monitor_control_replay(struct sock * mon_sk)699 static void send_monitor_control_replay(struct sock *mon_sk)
700 {
701 struct sock *sk;
702
703 read_lock(&hci_sk_list.lock);
704
705 sk_for_each(sk, &hci_sk_list.head) {
706 struct sk_buff *skb;
707
708 skb = create_monitor_ctrl_open(sk);
709 if (!skb)
710 continue;
711
712 if (sock_queue_rcv_skb(mon_sk, skb))
713 kfree_skb(skb);
714 }
715
716 read_unlock(&hci_sk_list.lock);
717 }
718
719 /* Generate internal stack event */
hci_si_event(struct hci_dev * hdev,int type,int dlen,void * data)720 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
721 {
722 struct hci_event_hdr *hdr;
723 struct hci_ev_stack_internal *ev;
724 struct sk_buff *skb;
725
726 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
727 if (!skb)
728 return;
729
730 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
731 hdr->evt = HCI_EV_STACK_INTERNAL;
732 hdr->plen = sizeof(*ev) + dlen;
733
734 ev = skb_put(skb, sizeof(*ev) + dlen);
735 ev->type = type;
736 memcpy(ev->data, data, dlen);
737
738 bt_cb(skb)->incoming = 1;
739 __net_timestamp(skb);
740
741 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
742 hci_send_to_sock(hdev, skb);
743 kfree_skb(skb);
744 }
745
hci_sock_dev_event(struct hci_dev * hdev,int event)746 void hci_sock_dev_event(struct hci_dev *hdev, int event)
747 {
748 BT_DBG("hdev %s event %d", hdev->name, event);
749
750 if (atomic_read(&monitor_promisc)) {
751 struct sk_buff *skb;
752
753 /* Send event to monitor */
754 skb = create_monitor_event(hdev, event);
755 if (skb) {
756 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
757 HCI_SOCK_TRUSTED, NULL);
758 kfree_skb(skb);
759 }
760 }
761
762 if (event <= HCI_DEV_DOWN) {
763 struct hci_ev_si_device ev;
764
765 /* Send event to sockets */
766 ev.event = event;
767 ev.dev_id = hdev->id;
768 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
769 }
770
771 if (event == HCI_DEV_UNREG) {
772 struct sock *sk;
773
774 /* Wake up sockets using this dead device */
775 read_lock(&hci_sk_list.lock);
776 sk_for_each(sk, &hci_sk_list.head) {
777 if (hci_pi(sk)->hdev == hdev) {
778 sk->sk_err = EPIPE;
779 sk->sk_state_change(sk);
780 }
781 }
782 read_unlock(&hci_sk_list.lock);
783 }
784 }
785
__hci_mgmt_chan_find(unsigned short channel)786 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
787 {
788 struct hci_mgmt_chan *c;
789
790 list_for_each_entry(c, &mgmt_chan_list, list) {
791 if (c->channel == channel)
792 return c;
793 }
794
795 return NULL;
796 }
797
hci_mgmt_chan_find(unsigned short channel)798 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
799 {
800 struct hci_mgmt_chan *c;
801
802 mutex_lock(&mgmt_chan_list_lock);
803 c = __hci_mgmt_chan_find(channel);
804 mutex_unlock(&mgmt_chan_list_lock);
805
806 return c;
807 }
808
hci_mgmt_chan_register(struct hci_mgmt_chan * c)809 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
810 {
811 if (c->channel < HCI_CHANNEL_CONTROL)
812 return -EINVAL;
813
814 mutex_lock(&mgmt_chan_list_lock);
815 if (__hci_mgmt_chan_find(c->channel)) {
816 mutex_unlock(&mgmt_chan_list_lock);
817 return -EALREADY;
818 }
819
820 list_add_tail(&c->list, &mgmt_chan_list);
821
822 mutex_unlock(&mgmt_chan_list_lock);
823
824 return 0;
825 }
826 EXPORT_SYMBOL(hci_mgmt_chan_register);
827
hci_mgmt_chan_unregister(struct hci_mgmt_chan * c)828 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
829 {
830 mutex_lock(&mgmt_chan_list_lock);
831 list_del(&c->list);
832 mutex_unlock(&mgmt_chan_list_lock);
833 }
834 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
835
hci_sock_release(struct socket * sock)836 static int hci_sock_release(struct socket *sock)
837 {
838 struct sock *sk = sock->sk;
839 struct hci_dev *hdev;
840 struct sk_buff *skb;
841
842 BT_DBG("sock %p sk %p", sock, sk);
843
844 if (!sk)
845 return 0;
846
847 lock_sock(sk);
848
849 switch (hci_pi(sk)->channel) {
850 case HCI_CHANNEL_MONITOR:
851 atomic_dec(&monitor_promisc);
852 break;
853 case HCI_CHANNEL_RAW:
854 case HCI_CHANNEL_USER:
855 case HCI_CHANNEL_CONTROL:
856 /* Send event to monitor */
857 skb = create_monitor_ctrl_close(sk);
858 if (skb) {
859 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
860 HCI_SOCK_TRUSTED, NULL);
861 kfree_skb(skb);
862 }
863
864 hci_sock_free_cookie(sk);
865 break;
866 }
867
868 bt_sock_unlink(&hci_sk_list, sk);
869
870 hdev = hci_pi(sk)->hdev;
871 if (hdev) {
872 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
873 /* When releasing a user channel exclusive access,
874 * call hci_dev_do_close directly instead of calling
875 * hci_dev_close to ensure the exclusive access will
876 * be released and the controller brought back down.
877 *
878 * The checking of HCI_AUTO_OFF is not needed in this
879 * case since it will have been cleared already when
880 * opening the user channel.
881 */
882 hci_dev_do_close(hdev);
883 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
884 mgmt_index_added(hdev);
885 }
886
887 atomic_dec(&hdev->promisc);
888 hci_dev_put(hdev);
889 }
890
891 sock_orphan(sk);
892 release_sock(sk);
893 sock_put(sk);
894 return 0;
895 }
896
hci_sock_reject_list_add(struct hci_dev * hdev,void __user * arg)897 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
898 {
899 bdaddr_t bdaddr;
900 int err;
901
902 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
903 return -EFAULT;
904
905 hci_dev_lock(hdev);
906
907 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
908
909 hci_dev_unlock(hdev);
910
911 return err;
912 }
913
hci_sock_reject_list_del(struct hci_dev * hdev,void __user * arg)914 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
915 {
916 bdaddr_t bdaddr;
917 int err;
918
919 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
920 return -EFAULT;
921
922 hci_dev_lock(hdev);
923
924 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
925
926 hci_dev_unlock(hdev);
927
928 return err;
929 }
930
931 /* Ioctls that require bound socket */
hci_sock_bound_ioctl(struct sock * sk,unsigned int cmd,unsigned long arg)932 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
933 unsigned long arg)
934 {
935 struct hci_dev *hdev = hci_hdev_from_sock(sk);
936
937 if (IS_ERR(hdev))
938 return PTR_ERR(hdev);
939
940 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
941 return -EBUSY;
942
943 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
944 return -EOPNOTSUPP;
945
946 if (hdev->dev_type != HCI_PRIMARY)
947 return -EOPNOTSUPP;
948
949 switch (cmd) {
950 case HCISETRAW:
951 if (!capable(CAP_NET_ADMIN))
952 return -EPERM;
953 return -EOPNOTSUPP;
954
955 case HCIGETCONNINFO:
956 return hci_get_conn_info(hdev, (void __user *)arg);
957
958 case HCIGETAUTHINFO:
959 return hci_get_auth_info(hdev, (void __user *)arg);
960
961 case HCIBLOCKADDR:
962 if (!capable(CAP_NET_ADMIN))
963 return -EPERM;
964 return hci_sock_reject_list_add(hdev, (void __user *)arg);
965
966 case HCIUNBLOCKADDR:
967 if (!capable(CAP_NET_ADMIN))
968 return -EPERM;
969 return hci_sock_reject_list_del(hdev, (void __user *)arg);
970 }
971
972 return -ENOIOCTLCMD;
973 }
974
hci_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)975 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
976 unsigned long arg)
977 {
978 void __user *argp = (void __user *)arg;
979 struct sock *sk = sock->sk;
980 int err;
981
982 BT_DBG("cmd %x arg %lx", cmd, arg);
983
984 /* Make sure the cmd is valid before doing anything */
985 switch (cmd) {
986 case HCIGETDEVLIST:
987 case HCIGETDEVINFO:
988 case HCIGETCONNLIST:
989 case HCIDEVUP:
990 case HCIDEVDOWN:
991 case HCIDEVRESET:
992 case HCIDEVRESTAT:
993 case HCISETSCAN:
994 case HCISETAUTH:
995 case HCISETENCRYPT:
996 case HCISETPTYPE:
997 case HCISETLINKPOL:
998 case HCISETLINKMODE:
999 case HCISETACLMTU:
1000 case HCISETSCOMTU:
1001 case HCIINQUIRY:
1002 case HCISETRAW:
1003 case HCIGETCONNINFO:
1004 case HCIGETAUTHINFO:
1005 case HCIBLOCKADDR:
1006 case HCIUNBLOCKADDR:
1007 break;
1008 default:
1009 return -ENOIOCTLCMD;
1010 }
1011
1012 lock_sock(sk);
1013
1014 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1015 err = -EBADFD;
1016 goto done;
1017 }
1018
1019 /* When calling an ioctl on an unbound raw socket, then ensure
1020 * that the monitor gets informed. Ensure that the resulting event
1021 * is only send once by checking if the cookie exists or not. The
1022 * socket cookie will be only ever generated once for the lifetime
1023 * of a given socket.
1024 */
1025 if (hci_sock_gen_cookie(sk)) {
1026 struct sk_buff *skb;
1027
1028 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1029 * flag. Make sure that not only the current task but also
1030 * the socket opener has the required capability, since
1031 * privileged programs can be tricked into making ioctl calls
1032 * on HCI sockets, and the socket should not be marked as
1033 * trusted simply because the ioctl caller is privileged.
1034 */
1035 if (sk_capable(sk, CAP_NET_ADMIN))
1036 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1037
1038 /* Send event to monitor */
1039 skb = create_monitor_ctrl_open(sk);
1040 if (skb) {
1041 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1042 HCI_SOCK_TRUSTED, NULL);
1043 kfree_skb(skb);
1044 }
1045 }
1046
1047 release_sock(sk);
1048
1049 switch (cmd) {
1050 case HCIGETDEVLIST:
1051 return hci_get_dev_list(argp);
1052
1053 case HCIGETDEVINFO:
1054 return hci_get_dev_info(argp);
1055
1056 case HCIGETCONNLIST:
1057 return hci_get_conn_list(argp);
1058
1059 case HCIDEVUP:
1060 if (!capable(CAP_NET_ADMIN))
1061 return -EPERM;
1062 return hci_dev_open(arg);
1063
1064 case HCIDEVDOWN:
1065 if (!capable(CAP_NET_ADMIN))
1066 return -EPERM;
1067 return hci_dev_close(arg);
1068
1069 case HCIDEVRESET:
1070 if (!capable(CAP_NET_ADMIN))
1071 return -EPERM;
1072 return hci_dev_reset(arg);
1073
1074 case HCIDEVRESTAT:
1075 if (!capable(CAP_NET_ADMIN))
1076 return -EPERM;
1077 return hci_dev_reset_stat(arg);
1078
1079 case HCISETSCAN:
1080 case HCISETAUTH:
1081 case HCISETENCRYPT:
1082 case HCISETPTYPE:
1083 case HCISETLINKPOL:
1084 case HCISETLINKMODE:
1085 case HCISETACLMTU:
1086 case HCISETSCOMTU:
1087 if (!capable(CAP_NET_ADMIN))
1088 return -EPERM;
1089 return hci_dev_cmd(cmd, argp);
1090
1091 case HCIINQUIRY:
1092 return hci_inquiry(argp);
1093 }
1094
1095 lock_sock(sk);
1096
1097 err = hci_sock_bound_ioctl(sk, cmd, arg);
1098
1099 done:
1100 release_sock(sk);
1101 return err;
1102 }
1103
1104 #ifdef CONFIG_COMPAT
hci_sock_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1105 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1106 unsigned long arg)
1107 {
1108 switch (cmd) {
1109 case HCIDEVUP:
1110 case HCIDEVDOWN:
1111 case HCIDEVRESET:
1112 case HCIDEVRESTAT:
1113 return hci_sock_ioctl(sock, cmd, arg);
1114 }
1115
1116 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1117 }
1118 #endif
1119
hci_sock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)1120 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1121 int addr_len)
1122 {
1123 struct sockaddr_hci haddr;
1124 struct sock *sk = sock->sk;
1125 struct hci_dev *hdev = NULL;
1126 struct sk_buff *skb;
1127 int len, err = 0;
1128
1129 BT_DBG("sock %p sk %p", sock, sk);
1130
1131 if (!addr)
1132 return -EINVAL;
1133
1134 memset(&haddr, 0, sizeof(haddr));
1135 len = min_t(unsigned int, sizeof(haddr), addr_len);
1136 memcpy(&haddr, addr, len);
1137
1138 if (haddr.hci_family != AF_BLUETOOTH)
1139 return -EINVAL;
1140
1141 lock_sock(sk);
1142
1143 /* Allow detaching from dead device and attaching to alive device, if
1144 * the caller wants to re-bind (instead of close) this socket in
1145 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1146 */
1147 hdev = hci_pi(sk)->hdev;
1148 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1149 hci_pi(sk)->hdev = NULL;
1150 sk->sk_state = BT_OPEN;
1151 hci_dev_put(hdev);
1152 }
1153 hdev = NULL;
1154
1155 if (sk->sk_state == BT_BOUND) {
1156 err = -EALREADY;
1157 goto done;
1158 }
1159
1160 switch (haddr.hci_channel) {
1161 case HCI_CHANNEL_RAW:
1162 if (hci_pi(sk)->hdev) {
1163 err = -EALREADY;
1164 goto done;
1165 }
1166
1167 if (haddr.hci_dev != HCI_DEV_NONE) {
1168 hdev = hci_dev_get(haddr.hci_dev);
1169 if (!hdev) {
1170 err = -ENODEV;
1171 goto done;
1172 }
1173
1174 atomic_inc(&hdev->promisc);
1175 }
1176
1177 hci_pi(sk)->channel = haddr.hci_channel;
1178
1179 if (!hci_sock_gen_cookie(sk)) {
1180 /* In the case when a cookie has already been assigned,
1181 * then there has been already an ioctl issued against
1182 * an unbound socket and with that triggered an open
1183 * notification. Send a close notification first to
1184 * allow the state transition to bounded.
1185 */
1186 skb = create_monitor_ctrl_close(sk);
1187 if (skb) {
1188 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1189 HCI_SOCK_TRUSTED, NULL);
1190 kfree_skb(skb);
1191 }
1192 }
1193
1194 if (capable(CAP_NET_ADMIN))
1195 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1196
1197 hci_pi(sk)->hdev = hdev;
1198
1199 /* Send event to monitor */
1200 skb = create_monitor_ctrl_open(sk);
1201 if (skb) {
1202 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1203 HCI_SOCK_TRUSTED, NULL);
1204 kfree_skb(skb);
1205 }
1206 break;
1207
1208 case HCI_CHANNEL_USER:
1209 if (hci_pi(sk)->hdev) {
1210 err = -EALREADY;
1211 goto done;
1212 }
1213
1214 if (haddr.hci_dev == HCI_DEV_NONE) {
1215 err = -EINVAL;
1216 goto done;
1217 }
1218
1219 if (!capable(CAP_NET_ADMIN)) {
1220 err = -EPERM;
1221 goto done;
1222 }
1223
1224 hdev = hci_dev_get(haddr.hci_dev);
1225 if (!hdev) {
1226 err = -ENODEV;
1227 goto done;
1228 }
1229
1230 if (test_bit(HCI_INIT, &hdev->flags) ||
1231 hci_dev_test_flag(hdev, HCI_SETUP) ||
1232 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1233 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1234 test_bit(HCI_UP, &hdev->flags))) {
1235 err = -EBUSY;
1236 hci_dev_put(hdev);
1237 goto done;
1238 }
1239
1240 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1241 err = -EUSERS;
1242 hci_dev_put(hdev);
1243 goto done;
1244 }
1245
1246 mgmt_index_removed(hdev);
1247
1248 err = hci_dev_open(hdev->id);
1249 if (err) {
1250 if (err == -EALREADY) {
1251 /* In case the transport is already up and
1252 * running, clear the error here.
1253 *
1254 * This can happen when opening a user
1255 * channel and HCI_AUTO_OFF grace period
1256 * is still active.
1257 */
1258 err = 0;
1259 } else {
1260 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1261 mgmt_index_added(hdev);
1262 hci_dev_put(hdev);
1263 goto done;
1264 }
1265 }
1266
1267 hci_pi(sk)->channel = haddr.hci_channel;
1268
1269 if (!hci_sock_gen_cookie(sk)) {
1270 /* In the case when a cookie has already been assigned,
1271 * this socket will transition from a raw socket into
1272 * a user channel socket. For a clean transition, send
1273 * the close notification first.
1274 */
1275 skb = create_monitor_ctrl_close(sk);
1276 if (skb) {
1277 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1278 HCI_SOCK_TRUSTED, NULL);
1279 kfree_skb(skb);
1280 }
1281 }
1282
1283 /* The user channel is restricted to CAP_NET_ADMIN
1284 * capabilities and with that implicitly trusted.
1285 */
1286 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1287
1288 hci_pi(sk)->hdev = hdev;
1289
1290 /* Send event to monitor */
1291 skb = create_monitor_ctrl_open(sk);
1292 if (skb) {
1293 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1294 HCI_SOCK_TRUSTED, NULL);
1295 kfree_skb(skb);
1296 }
1297
1298 atomic_inc(&hdev->promisc);
1299 break;
1300
1301 case HCI_CHANNEL_MONITOR:
1302 if (haddr.hci_dev != HCI_DEV_NONE) {
1303 err = -EINVAL;
1304 goto done;
1305 }
1306
1307 if (!capable(CAP_NET_RAW)) {
1308 err = -EPERM;
1309 goto done;
1310 }
1311
1312 hci_pi(sk)->channel = haddr.hci_channel;
1313
1314 /* The monitor interface is restricted to CAP_NET_RAW
1315 * capabilities and with that implicitly trusted.
1316 */
1317 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1318
1319 send_monitor_note(sk, "Linux version %s (%s)",
1320 init_utsname()->release,
1321 init_utsname()->machine);
1322 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1323 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1324 send_monitor_replay(sk);
1325 send_monitor_control_replay(sk);
1326
1327 atomic_inc(&monitor_promisc);
1328 break;
1329
1330 case HCI_CHANNEL_LOGGING:
1331 if (haddr.hci_dev != HCI_DEV_NONE) {
1332 err = -EINVAL;
1333 goto done;
1334 }
1335
1336 if (!capable(CAP_NET_ADMIN)) {
1337 err = -EPERM;
1338 goto done;
1339 }
1340
1341 hci_pi(sk)->channel = haddr.hci_channel;
1342 break;
1343
1344 default:
1345 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1346 err = -EINVAL;
1347 goto done;
1348 }
1349
1350 if (haddr.hci_dev != HCI_DEV_NONE) {
1351 err = -EINVAL;
1352 goto done;
1353 }
1354
1355 /* Users with CAP_NET_ADMIN capabilities are allowed
1356 * access to all management commands and events. For
1357 * untrusted users the interface is restricted and
1358 * also only untrusted events are sent.
1359 */
1360 if (capable(CAP_NET_ADMIN))
1361 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1362
1363 hci_pi(sk)->channel = haddr.hci_channel;
1364
1365 /* At the moment the index and unconfigured index events
1366 * are enabled unconditionally. Setting them on each
1367 * socket when binding keeps this functionality. They
1368 * however might be cleared later and then sending of these
1369 * events will be disabled, but that is then intentional.
1370 *
1371 * This also enables generic events that are safe to be
1372 * received by untrusted users. Example for such events
1373 * are changes to settings, class of device, name etc.
1374 */
1375 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1376 if (!hci_sock_gen_cookie(sk)) {
1377 /* In the case when a cookie has already been
1378 * assigned, this socket will transition from
1379 * a raw socket into a control socket. To
1380 * allow for a clean transition, send the
1381 * close notification first.
1382 */
1383 skb = create_monitor_ctrl_close(sk);
1384 if (skb) {
1385 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1386 HCI_SOCK_TRUSTED, NULL);
1387 kfree_skb(skb);
1388 }
1389 }
1390
1391 /* Send event to monitor */
1392 skb = create_monitor_ctrl_open(sk);
1393 if (skb) {
1394 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1395 HCI_SOCK_TRUSTED, NULL);
1396 kfree_skb(skb);
1397 }
1398
1399 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1400 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1401 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1402 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1403 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1404 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1405 }
1406 break;
1407 }
1408
1409 sk->sk_state = BT_BOUND;
1410
1411 done:
1412 release_sock(sk);
1413 return err;
1414 }
1415
hci_sock_getname(struct socket * sock,struct sockaddr * addr,int peer)1416 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1417 int peer)
1418 {
1419 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1420 struct sock *sk = sock->sk;
1421 struct hci_dev *hdev;
1422 int err = 0;
1423
1424 BT_DBG("sock %p sk %p", sock, sk);
1425
1426 if (peer)
1427 return -EOPNOTSUPP;
1428
1429 lock_sock(sk);
1430
1431 hdev = hci_hdev_from_sock(sk);
1432 if (IS_ERR(hdev)) {
1433 err = PTR_ERR(hdev);
1434 goto done;
1435 }
1436
1437 haddr->hci_family = AF_BLUETOOTH;
1438 haddr->hci_dev = hdev->id;
1439 haddr->hci_channel= hci_pi(sk)->channel;
1440 err = sizeof(*haddr);
1441
1442 done:
1443 release_sock(sk);
1444 return err;
1445 }
1446
hci_sock_cmsg(struct sock * sk,struct msghdr * msg,struct sk_buff * skb)1447 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1448 struct sk_buff *skb)
1449 {
1450 __u8 mask = hci_pi(sk)->cmsg_mask;
1451
1452 if (mask & HCI_CMSG_DIR) {
1453 int incoming = bt_cb(skb)->incoming;
1454 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1455 &incoming);
1456 }
1457
1458 if (mask & HCI_CMSG_TSTAMP) {
1459 #ifdef CONFIG_COMPAT
1460 struct old_timeval32 ctv;
1461 #endif
1462 struct __kernel_old_timeval tv;
1463 void *data;
1464 int len;
1465
1466 skb_get_timestamp(skb, &tv);
1467
1468 data = &tv;
1469 len = sizeof(tv);
1470 #ifdef CONFIG_COMPAT
1471 if (!COMPAT_USE_64BIT_TIME &&
1472 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1473 ctv.tv_sec = tv.tv_sec;
1474 ctv.tv_usec = tv.tv_usec;
1475 data = &ctv;
1476 len = sizeof(ctv);
1477 }
1478 #endif
1479
1480 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1481 }
1482 }
1483
hci_sock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1484 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1485 size_t len, int flags)
1486 {
1487 int noblock = flags & MSG_DONTWAIT;
1488 struct sock *sk = sock->sk;
1489 struct sk_buff *skb;
1490 int copied, err;
1491 unsigned int skblen;
1492
1493 BT_DBG("sock %p, sk %p", sock, sk);
1494
1495 if (flags & MSG_OOB)
1496 return -EOPNOTSUPP;
1497
1498 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1499 return -EOPNOTSUPP;
1500
1501 if (sk->sk_state == BT_CLOSED)
1502 return 0;
1503
1504 skb = skb_recv_datagram(sk, flags, noblock, &err);
1505 if (!skb)
1506 return err;
1507
1508 skblen = skb->len;
1509 copied = skb->len;
1510 if (len < copied) {
1511 msg->msg_flags |= MSG_TRUNC;
1512 copied = len;
1513 }
1514
1515 skb_reset_transport_header(skb);
1516 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1517
1518 switch (hci_pi(sk)->channel) {
1519 case HCI_CHANNEL_RAW:
1520 hci_sock_cmsg(sk, msg, skb);
1521 break;
1522 case HCI_CHANNEL_USER:
1523 case HCI_CHANNEL_MONITOR:
1524 sock_recv_timestamp(msg, sk, skb);
1525 break;
1526 default:
1527 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1528 sock_recv_timestamp(msg, sk, skb);
1529 break;
1530 }
1531
1532 skb_free_datagram(sk, skb);
1533
1534 if (flags & MSG_TRUNC)
1535 copied = skblen;
1536
1537 return err ? : copied;
1538 }
1539
hci_mgmt_cmd(struct hci_mgmt_chan * chan,struct sock * sk,struct msghdr * msg,size_t msglen)1540 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1541 struct msghdr *msg, size_t msglen)
1542 {
1543 void *buf;
1544 u8 *cp;
1545 struct mgmt_hdr *hdr;
1546 u16 opcode, index, len;
1547 struct hci_dev *hdev = NULL;
1548 const struct hci_mgmt_handler *handler;
1549 bool var_len, no_hdev;
1550 int err;
1551
1552 BT_DBG("got %zu bytes", msglen);
1553
1554 if (msglen < sizeof(*hdr))
1555 return -EINVAL;
1556
1557 buf = kmalloc(msglen, GFP_KERNEL);
1558 if (!buf)
1559 return -ENOMEM;
1560
1561 if (memcpy_from_msg(buf, msg, msglen)) {
1562 err = -EFAULT;
1563 goto done;
1564 }
1565
1566 hdr = buf;
1567 opcode = __le16_to_cpu(hdr->opcode);
1568 index = __le16_to_cpu(hdr->index);
1569 len = __le16_to_cpu(hdr->len);
1570
1571 if (len != msglen - sizeof(*hdr)) {
1572 err = -EINVAL;
1573 goto done;
1574 }
1575
1576 if (chan->channel == HCI_CHANNEL_CONTROL) {
1577 struct sk_buff *skb;
1578
1579 /* Send event to monitor */
1580 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1581 buf + sizeof(*hdr));
1582 if (skb) {
1583 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1584 HCI_SOCK_TRUSTED, NULL);
1585 kfree_skb(skb);
1586 }
1587 }
1588
1589 if (opcode >= chan->handler_count ||
1590 chan->handlers[opcode].func == NULL) {
1591 BT_DBG("Unknown op %u", opcode);
1592 err = mgmt_cmd_status(sk, index, opcode,
1593 MGMT_STATUS_UNKNOWN_COMMAND);
1594 goto done;
1595 }
1596
1597 handler = &chan->handlers[opcode];
1598
1599 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1600 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1601 err = mgmt_cmd_status(sk, index, opcode,
1602 MGMT_STATUS_PERMISSION_DENIED);
1603 goto done;
1604 }
1605
1606 if (index != MGMT_INDEX_NONE) {
1607 hdev = hci_dev_get(index);
1608 if (!hdev) {
1609 err = mgmt_cmd_status(sk, index, opcode,
1610 MGMT_STATUS_INVALID_INDEX);
1611 goto done;
1612 }
1613
1614 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1615 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1616 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1617 err = mgmt_cmd_status(sk, index, opcode,
1618 MGMT_STATUS_INVALID_INDEX);
1619 goto done;
1620 }
1621
1622 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1623 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1624 err = mgmt_cmd_status(sk, index, opcode,
1625 MGMT_STATUS_INVALID_INDEX);
1626 goto done;
1627 }
1628 }
1629
1630 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1631 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1632 if (no_hdev != !hdev) {
1633 err = mgmt_cmd_status(sk, index, opcode,
1634 MGMT_STATUS_INVALID_INDEX);
1635 goto done;
1636 }
1637 }
1638
1639 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1640 if ((var_len && len < handler->data_len) ||
1641 (!var_len && len != handler->data_len)) {
1642 err = mgmt_cmd_status(sk, index, opcode,
1643 MGMT_STATUS_INVALID_PARAMS);
1644 goto done;
1645 }
1646
1647 if (hdev && chan->hdev_init)
1648 chan->hdev_init(sk, hdev);
1649
1650 cp = buf + sizeof(*hdr);
1651
1652 err = handler->func(sk, hdev, cp, len);
1653 if (err < 0)
1654 goto done;
1655
1656 err = msglen;
1657
1658 done:
1659 if (hdev)
1660 hci_dev_put(hdev);
1661
1662 kfree(buf);
1663 return err;
1664 }
1665
hci_logging_frame(struct sock * sk,struct msghdr * msg,int len)1666 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1667 {
1668 struct hci_mon_hdr *hdr;
1669 struct sk_buff *skb;
1670 struct hci_dev *hdev;
1671 u16 index;
1672 int err;
1673
1674 /* The logging frame consists at minimum of the standard header,
1675 * the priority byte, the ident length byte and at least one string
1676 * terminator NUL byte. Anything shorter are invalid packets.
1677 */
1678 if (len < sizeof(*hdr) + 3)
1679 return -EINVAL;
1680
1681 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1682 if (!skb)
1683 return err;
1684
1685 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1686 err = -EFAULT;
1687 goto drop;
1688 }
1689
1690 hdr = (void *)skb->data;
1691
1692 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1693 err = -EINVAL;
1694 goto drop;
1695 }
1696
1697 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1698 __u8 priority = skb->data[sizeof(*hdr)];
1699 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1700
1701 /* Only the priorities 0-7 are valid and with that any other
1702 * value results in an invalid packet.
1703 *
1704 * The priority byte is followed by an ident length byte and
1705 * the NUL terminated ident string. Check that the ident
1706 * length is not overflowing the packet and also that the
1707 * ident string itself is NUL terminated. In case the ident
1708 * length is zero, the length value actually doubles as NUL
1709 * terminator identifier.
1710 *
1711 * The message follows the ident string (if present) and
1712 * must be NUL terminated. Otherwise it is not a valid packet.
1713 */
1714 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1715 ident_len > len - sizeof(*hdr) - 3 ||
1716 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1717 err = -EINVAL;
1718 goto drop;
1719 }
1720 } else {
1721 err = -EINVAL;
1722 goto drop;
1723 }
1724
1725 index = __le16_to_cpu(hdr->index);
1726
1727 if (index != MGMT_INDEX_NONE) {
1728 hdev = hci_dev_get(index);
1729 if (!hdev) {
1730 err = -ENODEV;
1731 goto drop;
1732 }
1733 } else {
1734 hdev = NULL;
1735 }
1736
1737 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1738
1739 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1740 err = len;
1741
1742 if (hdev)
1743 hci_dev_put(hdev);
1744
1745 drop:
1746 kfree_skb(skb);
1747 return err;
1748 }
1749
hci_sock_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1750 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1751 size_t len)
1752 {
1753 struct sock *sk = sock->sk;
1754 struct hci_mgmt_chan *chan;
1755 struct hci_dev *hdev;
1756 struct sk_buff *skb;
1757 int err;
1758
1759 BT_DBG("sock %p sk %p", sock, sk);
1760
1761 if (msg->msg_flags & MSG_OOB)
1762 return -EOPNOTSUPP;
1763
1764 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1765 MSG_CMSG_COMPAT))
1766 return -EINVAL;
1767
1768 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1769 return -EINVAL;
1770
1771 lock_sock(sk);
1772
1773 switch (hci_pi(sk)->channel) {
1774 case HCI_CHANNEL_RAW:
1775 case HCI_CHANNEL_USER:
1776 break;
1777 case HCI_CHANNEL_MONITOR:
1778 err = -EOPNOTSUPP;
1779 goto done;
1780 case HCI_CHANNEL_LOGGING:
1781 err = hci_logging_frame(sk, msg, len);
1782 goto done;
1783 default:
1784 mutex_lock(&mgmt_chan_list_lock);
1785 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1786 if (chan)
1787 err = hci_mgmt_cmd(chan, sk, msg, len);
1788 else
1789 err = -EINVAL;
1790
1791 mutex_unlock(&mgmt_chan_list_lock);
1792 goto done;
1793 }
1794
1795 hdev = hci_hdev_from_sock(sk);
1796 if (IS_ERR(hdev)) {
1797 err = PTR_ERR(hdev);
1798 goto done;
1799 }
1800
1801 if (!test_bit(HCI_UP, &hdev->flags)) {
1802 err = -ENETDOWN;
1803 goto done;
1804 }
1805
1806 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1807 if (!skb)
1808 goto done;
1809
1810 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1811 err = -EFAULT;
1812 goto drop;
1813 }
1814
1815 hci_skb_pkt_type(skb) = skb->data[0];
1816 skb_pull(skb, 1);
1817
1818 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1819 /* No permission check is needed for user channel
1820 * since that gets enforced when binding the socket.
1821 *
1822 * However check that the packet type is valid.
1823 */
1824 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1825 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1826 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1827 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1828 err = -EINVAL;
1829 goto drop;
1830 }
1831
1832 skb_queue_tail(&hdev->raw_q, skb);
1833 queue_work(hdev->workqueue, &hdev->tx_work);
1834 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1835 u16 opcode = get_unaligned_le16(skb->data);
1836 u16 ogf = hci_opcode_ogf(opcode);
1837 u16 ocf = hci_opcode_ocf(opcode);
1838
1839 if (((ogf > HCI_SFLT_MAX_OGF) ||
1840 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1841 &hci_sec_filter.ocf_mask[ogf])) &&
1842 !capable(CAP_NET_RAW)) {
1843 err = -EPERM;
1844 goto drop;
1845 }
1846
1847 /* Since the opcode has already been extracted here, store
1848 * a copy of the value for later use by the drivers.
1849 */
1850 hci_skb_opcode(skb) = opcode;
1851
1852 if (ogf == 0x3f) {
1853 skb_queue_tail(&hdev->raw_q, skb);
1854 queue_work(hdev->workqueue, &hdev->tx_work);
1855 } else {
1856 /* Stand-alone HCI commands must be flagged as
1857 * single-command requests.
1858 */
1859 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1860
1861 skb_queue_tail(&hdev->cmd_q, skb);
1862 queue_work(hdev->workqueue, &hdev->cmd_work);
1863 }
1864 } else {
1865 if (!capable(CAP_NET_RAW)) {
1866 err = -EPERM;
1867 goto drop;
1868 }
1869
1870 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1871 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1872 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1873 err = -EINVAL;
1874 goto drop;
1875 }
1876
1877 skb_queue_tail(&hdev->raw_q, skb);
1878 queue_work(hdev->workqueue, &hdev->tx_work);
1879 }
1880
1881 err = len;
1882
1883 done:
1884 release_sock(sk);
1885 return err;
1886
1887 drop:
1888 kfree_skb(skb);
1889 goto done;
1890 }
1891
hci_sock_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int len)1892 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1893 sockptr_t optval, unsigned int len)
1894 {
1895 struct hci_ufilter uf = { .opcode = 0 };
1896 struct sock *sk = sock->sk;
1897 int err = 0, opt = 0;
1898
1899 BT_DBG("sk %p, opt %d", sk, optname);
1900
1901 if (level != SOL_HCI)
1902 return -ENOPROTOOPT;
1903
1904 lock_sock(sk);
1905
1906 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1907 err = -EBADFD;
1908 goto done;
1909 }
1910
1911 switch (optname) {
1912 case HCI_DATA_DIR:
1913 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1914 err = -EFAULT;
1915 break;
1916 }
1917
1918 if (opt)
1919 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1920 else
1921 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1922 break;
1923
1924 case HCI_TIME_STAMP:
1925 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1926 err = -EFAULT;
1927 break;
1928 }
1929
1930 if (opt)
1931 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1932 else
1933 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1934 break;
1935
1936 case HCI_FILTER:
1937 {
1938 struct hci_filter *f = &hci_pi(sk)->filter;
1939
1940 uf.type_mask = f->type_mask;
1941 uf.opcode = f->opcode;
1942 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1943 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1944 }
1945
1946 len = min_t(unsigned int, len, sizeof(uf));
1947 if (copy_from_sockptr(&uf, optval, len)) {
1948 err = -EFAULT;
1949 break;
1950 }
1951
1952 if (!capable(CAP_NET_RAW)) {
1953 uf.type_mask &= hci_sec_filter.type_mask;
1954 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1955 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1956 }
1957
1958 {
1959 struct hci_filter *f = &hci_pi(sk)->filter;
1960
1961 f->type_mask = uf.type_mask;
1962 f->opcode = uf.opcode;
1963 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1964 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1965 }
1966 break;
1967
1968 default:
1969 err = -ENOPROTOOPT;
1970 break;
1971 }
1972
1973 done:
1974 release_sock(sk);
1975 return err;
1976 }
1977
hci_sock_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1978 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1979 char __user *optval, int __user *optlen)
1980 {
1981 struct hci_ufilter uf;
1982 struct sock *sk = sock->sk;
1983 int len, opt, err = 0;
1984
1985 BT_DBG("sk %p, opt %d", sk, optname);
1986
1987 if (level != SOL_HCI)
1988 return -ENOPROTOOPT;
1989
1990 if (get_user(len, optlen))
1991 return -EFAULT;
1992
1993 lock_sock(sk);
1994
1995 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1996 err = -EBADFD;
1997 goto done;
1998 }
1999
2000 switch (optname) {
2001 case HCI_DATA_DIR:
2002 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2003 opt = 1;
2004 else
2005 opt = 0;
2006
2007 if (put_user(opt, optval))
2008 err = -EFAULT;
2009 break;
2010
2011 case HCI_TIME_STAMP:
2012 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2013 opt = 1;
2014 else
2015 opt = 0;
2016
2017 if (put_user(opt, optval))
2018 err = -EFAULT;
2019 break;
2020
2021 case HCI_FILTER:
2022 {
2023 struct hci_filter *f = &hci_pi(sk)->filter;
2024
2025 memset(&uf, 0, sizeof(uf));
2026 uf.type_mask = f->type_mask;
2027 uf.opcode = f->opcode;
2028 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2029 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2030 }
2031
2032 len = min_t(unsigned int, len, sizeof(uf));
2033 if (copy_to_user(optval, &uf, len))
2034 err = -EFAULT;
2035 break;
2036
2037 default:
2038 err = -ENOPROTOOPT;
2039 break;
2040 }
2041
2042 done:
2043 release_sock(sk);
2044 return err;
2045 }
2046
hci_sock_destruct(struct sock * sk)2047 static void hci_sock_destruct(struct sock *sk)
2048 {
2049 skb_queue_purge(&sk->sk_receive_queue);
2050 skb_queue_purge(&sk->sk_write_queue);
2051 }
2052
2053 static const struct proto_ops hci_sock_ops = {
2054 .family = PF_BLUETOOTH,
2055 .owner = THIS_MODULE,
2056 .release = hci_sock_release,
2057 .bind = hci_sock_bind,
2058 .getname = hci_sock_getname,
2059 .sendmsg = hci_sock_sendmsg,
2060 .recvmsg = hci_sock_recvmsg,
2061 .ioctl = hci_sock_ioctl,
2062 #ifdef CONFIG_COMPAT
2063 .compat_ioctl = hci_sock_compat_ioctl,
2064 #endif
2065 .poll = datagram_poll,
2066 .listen = sock_no_listen,
2067 .shutdown = sock_no_shutdown,
2068 .setsockopt = hci_sock_setsockopt,
2069 .getsockopt = hci_sock_getsockopt,
2070 .connect = sock_no_connect,
2071 .socketpair = sock_no_socketpair,
2072 .accept = sock_no_accept,
2073 .mmap = sock_no_mmap
2074 };
2075
2076 static struct proto hci_sk_proto = {
2077 .name = "HCI",
2078 .owner = THIS_MODULE,
2079 .obj_size = sizeof(struct hci_pinfo)
2080 };
2081
hci_sock_create(struct net * net,struct socket * sock,int protocol,int kern)2082 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2083 int kern)
2084 {
2085 struct sock *sk;
2086
2087 BT_DBG("sock %p", sock);
2088
2089 if (sock->type != SOCK_RAW)
2090 return -ESOCKTNOSUPPORT;
2091
2092 sock->ops = &hci_sock_ops;
2093
2094 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2095 if (!sk)
2096 return -ENOMEM;
2097
2098 sock_init_data(sock, sk);
2099
2100 sock_reset_flag(sk, SOCK_ZAPPED);
2101
2102 sk->sk_protocol = protocol;
2103
2104 sock->state = SS_UNCONNECTED;
2105 sk->sk_state = BT_OPEN;
2106 sk->sk_destruct = hci_sock_destruct;
2107
2108 bt_sock_link(&hci_sk_list, sk);
2109 return 0;
2110 }
2111
2112 static const struct net_proto_family hci_sock_family_ops = {
2113 .family = PF_BLUETOOTH,
2114 .owner = THIS_MODULE,
2115 .create = hci_sock_create,
2116 };
2117
hci_sock_init(void)2118 int __init hci_sock_init(void)
2119 {
2120 int err;
2121
2122 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2123
2124 err = proto_register(&hci_sk_proto, 0);
2125 if (err < 0)
2126 return err;
2127
2128 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2129 if (err < 0) {
2130 BT_ERR("HCI socket registration failed");
2131 goto error;
2132 }
2133
2134 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2135 if (err < 0) {
2136 BT_ERR("Failed to create HCI proc file");
2137 bt_sock_unregister(BTPROTO_HCI);
2138 goto error;
2139 }
2140
2141 BT_INFO("HCI socket layer initialized");
2142
2143 return 0;
2144
2145 error:
2146 proto_unregister(&hci_sk_proto);
2147 return err;
2148 }
2149
hci_sock_cleanup(void)2150 void hci_sock_cleanup(void)
2151 {
2152 bt_procfs_cleanup(&init_net, "hci");
2153 bt_sock_unregister(BTPROTO_HCI);
2154 proto_unregister(&hci_sk_proto);
2155 }
2156