1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_mon.h>
33
34 static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36 /* ----- HCI socket interface ----- */
37
38 /* Socket info */
39 #define hci_pi(sk) ((struct hci_pinfo *) sk)
40
41 struct hci_pinfo {
42 struct bt_sock bt;
43 struct hci_dev *hdev;
44 struct hci_filter filter;
45 __u32 cmsg_mask;
46 unsigned short channel;
47 };
48
hci_test_bit(int nr,const void * addr)49 static inline int hci_test_bit(int nr, const void *addr)
50 {
51 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
52 }
53
54 /* Security filter */
55 #define HCI_SFLT_MAX_OGF 5
56
57 struct hci_sec_filter {
58 __u32 type_mask;
59 __u32 event_mask[2];
60 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
61 };
62
63 static const struct hci_sec_filter hci_sec_filter = {
64 /* Packet types */
65 0x10,
66 /* Events */
67 { 0x1000d9fe, 0x0000b00c },
68 /* Commands */
69 {
70 { 0x0 },
71 /* OGF_LINK_CTL */
72 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
73 /* OGF_LINK_POLICY */
74 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
75 /* OGF_HOST_CTL */
76 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
77 /* OGF_INFO_PARAM */
78 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
79 /* OGF_STATUS_PARAM */
80 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
81 }
82 };
83
84 static struct bt_sock_list hci_sk_list = {
85 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
86 };
87
is_filtered_packet(struct sock * sk,struct sk_buff * skb)88 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
89 {
90 struct hci_filter *flt;
91 int flt_type, flt_event;
92
93 /* Apply filter */
94 flt = &hci_pi(sk)->filter;
95
96 if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
97 flt_type = 0;
98 else
99 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
100
101 if (!test_bit(flt_type, &flt->type_mask))
102 return true;
103
104 /* Extra filter for event packets only */
105 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
106 return false;
107
108 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
109
110 if (!hci_test_bit(flt_event, &flt->event_mask))
111 return true;
112
113 /* Check filter only when opcode is set */
114 if (!flt->opcode)
115 return false;
116
117 if (flt_event == HCI_EV_CMD_COMPLETE &&
118 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
119 return true;
120
121 if (flt_event == HCI_EV_CMD_STATUS &&
122 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
123 return true;
124
125 return false;
126 }
127
128 /* Send frame to RAW socket */
hci_send_to_sock(struct hci_dev * hdev,struct sk_buff * skb)129 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 struct sock *sk;
132 struct sk_buff *skb_copy = NULL;
133
134 BT_DBG("hdev %p len %d", hdev, skb->len);
135
136 read_lock(&hci_sk_list.lock);
137
138 sk_for_each(sk, &hci_sk_list.head) {
139 struct sk_buff *nskb;
140
141 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
142 continue;
143
144 /* Don't send frame to the socket it came from */
145 if (skb->sk == sk)
146 continue;
147
148 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
149 if (is_filtered_packet(sk, skb))
150 continue;
151 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
152 if (!bt_cb(skb)->incoming)
153 continue;
154 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
155 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
156 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
157 continue;
158 } else {
159 /* Don't send frame to other channel types */
160 continue;
161 }
162
163 if (!skb_copy) {
164 /* Create a private copy with headroom */
165 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
166 if (!skb_copy)
167 continue;
168
169 /* Put type byte before the data */
170 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
171 }
172
173 nskb = skb_clone(skb_copy, GFP_ATOMIC);
174 if (!nskb)
175 continue;
176
177 if (sock_queue_rcv_skb(sk, nskb))
178 kfree_skb(nskb);
179 }
180
181 read_unlock(&hci_sk_list.lock);
182
183 kfree_skb(skb_copy);
184 }
185
186 /* Send frame to control socket */
hci_send_to_control(struct sk_buff * skb,struct sock * skip_sk)187 void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
188 {
189 struct sock *sk;
190
191 BT_DBG("len %d", skb->len);
192
193 read_lock(&hci_sk_list.lock);
194
195 sk_for_each(sk, &hci_sk_list.head) {
196 struct sk_buff *nskb;
197
198 /* Skip the original socket */
199 if (sk == skip_sk)
200 continue;
201
202 if (sk->sk_state != BT_BOUND)
203 continue;
204
205 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
206 continue;
207
208 nskb = skb_clone(skb, GFP_ATOMIC);
209 if (!nskb)
210 continue;
211
212 if (sock_queue_rcv_skb(sk, nskb))
213 kfree_skb(nskb);
214 }
215
216 read_unlock(&hci_sk_list.lock);
217 }
218
219 /* Send frame to monitor socket */
hci_send_to_monitor(struct hci_dev * hdev,struct sk_buff * skb)220 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
221 {
222 struct sock *sk;
223 struct sk_buff *skb_copy = NULL;
224 __le16 opcode;
225
226 if (!atomic_read(&monitor_promisc))
227 return;
228
229 BT_DBG("hdev %p len %d", hdev, skb->len);
230
231 switch (bt_cb(skb)->pkt_type) {
232 case HCI_COMMAND_PKT:
233 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
234 break;
235 case HCI_EVENT_PKT:
236 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
237 break;
238 case HCI_ACLDATA_PKT:
239 if (bt_cb(skb)->incoming)
240 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
241 else
242 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
243 break;
244 case HCI_SCODATA_PKT:
245 if (bt_cb(skb)->incoming)
246 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
247 else
248 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
249 break;
250 default:
251 return;
252 }
253
254 read_lock(&hci_sk_list.lock);
255
256 sk_for_each(sk, &hci_sk_list.head) {
257 struct sk_buff *nskb;
258
259 if (sk->sk_state != BT_BOUND)
260 continue;
261
262 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
263 continue;
264
265 if (!skb_copy) {
266 struct hci_mon_hdr *hdr;
267
268 /* Create a private copy with headroom */
269 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE,
270 GFP_ATOMIC, true);
271 if (!skb_copy)
272 continue;
273
274 /* Put header before the data */
275 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
276 hdr->opcode = opcode;
277 hdr->index = cpu_to_le16(hdev->id);
278 hdr->len = cpu_to_le16(skb->len);
279 }
280
281 nskb = skb_clone(skb_copy, GFP_ATOMIC);
282 if (!nskb)
283 continue;
284
285 if (sock_queue_rcv_skb(sk, nskb))
286 kfree_skb(nskb);
287 }
288
289 read_unlock(&hci_sk_list.lock);
290
291 kfree_skb(skb_copy);
292 }
293
send_monitor_event(struct sk_buff * skb)294 static void send_monitor_event(struct sk_buff *skb)
295 {
296 struct sock *sk;
297
298 BT_DBG("len %d", skb->len);
299
300 read_lock(&hci_sk_list.lock);
301
302 sk_for_each(sk, &hci_sk_list.head) {
303 struct sk_buff *nskb;
304
305 if (sk->sk_state != BT_BOUND)
306 continue;
307
308 if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
309 continue;
310
311 nskb = skb_clone(skb, GFP_ATOMIC);
312 if (!nskb)
313 continue;
314
315 if (sock_queue_rcv_skb(sk, nskb))
316 kfree_skb(nskb);
317 }
318
319 read_unlock(&hci_sk_list.lock);
320 }
321
create_monitor_event(struct hci_dev * hdev,int event)322 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
323 {
324 struct hci_mon_hdr *hdr;
325 struct hci_mon_new_index *ni;
326 struct sk_buff *skb;
327 __le16 opcode;
328
329 switch (event) {
330 case HCI_DEV_REG:
331 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
332 if (!skb)
333 return NULL;
334
335 ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
336 ni->type = hdev->dev_type;
337 ni->bus = hdev->bus;
338 bacpy(&ni->bdaddr, &hdev->bdaddr);
339 memcpy(ni->name, hdev->name, 8);
340
341 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
342 break;
343
344 case HCI_DEV_UNREG:
345 skb = bt_skb_alloc(0, GFP_ATOMIC);
346 if (!skb)
347 return NULL;
348
349 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
350 break;
351
352 default:
353 return NULL;
354 }
355
356 __net_timestamp(skb);
357
358 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
359 hdr->opcode = opcode;
360 hdr->index = cpu_to_le16(hdev->id);
361 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
362
363 return skb;
364 }
365
send_monitor_replay(struct sock * sk)366 static void send_monitor_replay(struct sock *sk)
367 {
368 struct hci_dev *hdev;
369
370 read_lock(&hci_dev_list_lock);
371
372 list_for_each_entry(hdev, &hci_dev_list, list) {
373 struct sk_buff *skb;
374
375 skb = create_monitor_event(hdev, HCI_DEV_REG);
376 if (!skb)
377 continue;
378
379 if (sock_queue_rcv_skb(sk, skb))
380 kfree_skb(skb);
381 }
382
383 read_unlock(&hci_dev_list_lock);
384 }
385
386 /* Generate internal stack event */
hci_si_event(struct hci_dev * hdev,int type,int dlen,void * data)387 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
388 {
389 struct hci_event_hdr *hdr;
390 struct hci_ev_stack_internal *ev;
391 struct sk_buff *skb;
392
393 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
394 if (!skb)
395 return;
396
397 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
398 hdr->evt = HCI_EV_STACK_INTERNAL;
399 hdr->plen = sizeof(*ev) + dlen;
400
401 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
402 ev->type = type;
403 memcpy(ev->data, data, dlen);
404
405 bt_cb(skb)->incoming = 1;
406 __net_timestamp(skb);
407
408 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
409 hci_send_to_sock(hdev, skb);
410 kfree_skb(skb);
411 }
412
hci_sock_dev_event(struct hci_dev * hdev,int event)413 void hci_sock_dev_event(struct hci_dev *hdev, int event)
414 {
415 struct hci_ev_si_device ev;
416
417 BT_DBG("hdev %s event %d", hdev->name, event);
418
419 /* Send event to monitor */
420 if (atomic_read(&monitor_promisc)) {
421 struct sk_buff *skb;
422
423 skb = create_monitor_event(hdev, event);
424 if (skb) {
425 send_monitor_event(skb);
426 kfree_skb(skb);
427 }
428 }
429
430 /* Send event to sockets */
431 ev.event = event;
432 ev.dev_id = hdev->id;
433 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
434
435 if (event == HCI_DEV_UNREG) {
436 struct sock *sk;
437
438 /* Detach sockets from device */
439 read_lock(&hci_sk_list.lock);
440 sk_for_each(sk, &hci_sk_list.head) {
441 bh_lock_sock_nested(sk);
442 if (hci_pi(sk)->hdev == hdev) {
443 hci_pi(sk)->hdev = NULL;
444 sk->sk_err = EPIPE;
445 sk->sk_state = BT_OPEN;
446 sk->sk_state_change(sk);
447
448 hci_dev_put(hdev);
449 }
450 bh_unlock_sock(sk);
451 }
452 read_unlock(&hci_sk_list.lock);
453 }
454 }
455
hci_sock_release(struct socket * sock)456 static int hci_sock_release(struct socket *sock)
457 {
458 struct sock *sk = sock->sk;
459 struct hci_dev *hdev;
460
461 BT_DBG("sock %p sk %p", sock, sk);
462
463 if (!sk)
464 return 0;
465
466 hdev = hci_pi(sk)->hdev;
467
468 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
469 atomic_dec(&monitor_promisc);
470
471 bt_sock_unlink(&hci_sk_list, sk);
472
473 if (hdev) {
474 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
475 mgmt_index_added(hdev);
476 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
477 hci_dev_close(hdev->id);
478 }
479
480 atomic_dec(&hdev->promisc);
481 hci_dev_put(hdev);
482 }
483
484 sock_orphan(sk);
485
486 skb_queue_purge(&sk->sk_receive_queue);
487 skb_queue_purge(&sk->sk_write_queue);
488
489 sock_put(sk);
490 return 0;
491 }
492
hci_sock_blacklist_add(struct hci_dev * hdev,void __user * arg)493 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
494 {
495 bdaddr_t bdaddr;
496 int err;
497
498 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
499 return -EFAULT;
500
501 hci_dev_lock(hdev);
502
503 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
504
505 hci_dev_unlock(hdev);
506
507 return err;
508 }
509
hci_sock_blacklist_del(struct hci_dev * hdev,void __user * arg)510 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
511 {
512 bdaddr_t bdaddr;
513 int err;
514
515 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
516 return -EFAULT;
517
518 hci_dev_lock(hdev);
519
520 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
521
522 hci_dev_unlock(hdev);
523
524 return err;
525 }
526
527 /* Ioctls that require bound socket */
hci_sock_bound_ioctl(struct sock * sk,unsigned int cmd,unsigned long arg)528 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
529 unsigned long arg)
530 {
531 struct hci_dev *hdev = hci_pi(sk)->hdev;
532
533 if (!hdev)
534 return -EBADFD;
535
536 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
537 return -EBUSY;
538
539 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
540 return -EOPNOTSUPP;
541
542 if (hdev->dev_type != HCI_BREDR)
543 return -EOPNOTSUPP;
544
545 switch (cmd) {
546 case HCISETRAW:
547 if (!capable(CAP_NET_ADMIN))
548 return -EPERM;
549 return -EOPNOTSUPP;
550
551 case HCIGETCONNINFO:
552 return hci_get_conn_info(hdev, (void __user *) arg);
553
554 case HCIGETAUTHINFO:
555 return hci_get_auth_info(hdev, (void __user *) arg);
556
557 case HCIBLOCKADDR:
558 if (!capable(CAP_NET_ADMIN))
559 return -EPERM;
560 return hci_sock_blacklist_add(hdev, (void __user *) arg);
561
562 case HCIUNBLOCKADDR:
563 if (!capable(CAP_NET_ADMIN))
564 return -EPERM;
565 return hci_sock_blacklist_del(hdev, (void __user *) arg);
566 }
567
568 return -ENOIOCTLCMD;
569 }
570
hci_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)571 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
572 unsigned long arg)
573 {
574 void __user *argp = (void __user *) arg;
575 struct sock *sk = sock->sk;
576 int err;
577
578 BT_DBG("cmd %x arg %lx", cmd, arg);
579
580 lock_sock(sk);
581
582 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
583 err = -EBADFD;
584 goto done;
585 }
586
587 release_sock(sk);
588
589 switch (cmd) {
590 case HCIGETDEVLIST:
591 return hci_get_dev_list(argp);
592
593 case HCIGETDEVINFO:
594 return hci_get_dev_info(argp);
595
596 case HCIGETCONNLIST:
597 return hci_get_conn_list(argp);
598
599 case HCIDEVUP:
600 if (!capable(CAP_NET_ADMIN))
601 return -EPERM;
602 return hci_dev_open(arg);
603
604 case HCIDEVDOWN:
605 if (!capable(CAP_NET_ADMIN))
606 return -EPERM;
607 return hci_dev_close(arg);
608
609 case HCIDEVRESET:
610 if (!capable(CAP_NET_ADMIN))
611 return -EPERM;
612 return hci_dev_reset(arg);
613
614 case HCIDEVRESTAT:
615 if (!capable(CAP_NET_ADMIN))
616 return -EPERM;
617 return hci_dev_reset_stat(arg);
618
619 case HCISETSCAN:
620 case HCISETAUTH:
621 case HCISETENCRYPT:
622 case HCISETPTYPE:
623 case HCISETLINKPOL:
624 case HCISETLINKMODE:
625 case HCISETACLMTU:
626 case HCISETSCOMTU:
627 if (!capable(CAP_NET_ADMIN))
628 return -EPERM;
629 return hci_dev_cmd(cmd, argp);
630
631 case HCIINQUIRY:
632 return hci_inquiry(argp);
633 }
634
635 lock_sock(sk);
636
637 err = hci_sock_bound_ioctl(sk, cmd, arg);
638
639 done:
640 release_sock(sk);
641 return err;
642 }
643
hci_sock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)644 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
645 int addr_len)
646 {
647 struct sockaddr_hci haddr;
648 struct sock *sk = sock->sk;
649 struct hci_dev *hdev = NULL;
650 int len, err = 0;
651
652 BT_DBG("sock %p sk %p", sock, sk);
653
654 if (!addr)
655 return -EINVAL;
656
657 memset(&haddr, 0, sizeof(haddr));
658 len = min_t(unsigned int, sizeof(haddr), addr_len);
659 memcpy(&haddr, addr, len);
660
661 if (haddr.hci_family != AF_BLUETOOTH)
662 return -EINVAL;
663
664 lock_sock(sk);
665
666 if (sk->sk_state == BT_BOUND) {
667 err = -EALREADY;
668 goto done;
669 }
670
671 switch (haddr.hci_channel) {
672 case HCI_CHANNEL_RAW:
673 if (hci_pi(sk)->hdev) {
674 err = -EALREADY;
675 goto done;
676 }
677
678 if (haddr.hci_dev != HCI_DEV_NONE) {
679 hdev = hci_dev_get(haddr.hci_dev);
680 if (!hdev) {
681 err = -ENODEV;
682 goto done;
683 }
684
685 atomic_inc(&hdev->promisc);
686 }
687
688 hci_pi(sk)->hdev = hdev;
689 break;
690
691 case HCI_CHANNEL_USER:
692 if (hci_pi(sk)->hdev) {
693 err = -EALREADY;
694 goto done;
695 }
696
697 if (haddr.hci_dev == HCI_DEV_NONE) {
698 err = -EINVAL;
699 goto done;
700 }
701
702 if (!capable(CAP_NET_ADMIN)) {
703 err = -EPERM;
704 goto done;
705 }
706
707 hdev = hci_dev_get(haddr.hci_dev);
708 if (!hdev) {
709 err = -ENODEV;
710 goto done;
711 }
712
713 if (test_bit(HCI_UP, &hdev->flags) ||
714 test_bit(HCI_INIT, &hdev->flags) ||
715 test_bit(HCI_SETUP, &hdev->dev_flags) ||
716 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
717 err = -EBUSY;
718 hci_dev_put(hdev);
719 goto done;
720 }
721
722 if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
723 err = -EUSERS;
724 hci_dev_put(hdev);
725 goto done;
726 }
727
728 mgmt_index_removed(hdev);
729
730 err = hci_dev_open(hdev->id);
731 if (err) {
732 clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
733 mgmt_index_added(hdev);
734 hci_dev_put(hdev);
735 goto done;
736 }
737
738 atomic_inc(&hdev->promisc);
739
740 hci_pi(sk)->hdev = hdev;
741 break;
742
743 case HCI_CHANNEL_CONTROL:
744 if (haddr.hci_dev != HCI_DEV_NONE) {
745 err = -EINVAL;
746 goto done;
747 }
748
749 if (!capable(CAP_NET_ADMIN)) {
750 err = -EPERM;
751 goto done;
752 }
753
754 break;
755
756 case HCI_CHANNEL_MONITOR:
757 if (haddr.hci_dev != HCI_DEV_NONE) {
758 err = -EINVAL;
759 goto done;
760 }
761
762 if (!capable(CAP_NET_RAW)) {
763 err = -EPERM;
764 goto done;
765 }
766
767 send_monitor_replay(sk);
768
769 atomic_inc(&monitor_promisc);
770 break;
771
772 default:
773 err = -EINVAL;
774 goto done;
775 }
776
777
778 hci_pi(sk)->channel = haddr.hci_channel;
779 sk->sk_state = BT_BOUND;
780
781 done:
782 release_sock(sk);
783 return err;
784 }
785
hci_sock_getname(struct socket * sock,struct sockaddr * addr,int * addr_len,int peer)786 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
787 int *addr_len, int peer)
788 {
789 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
790 struct sock *sk = sock->sk;
791 struct hci_dev *hdev;
792 int err = 0;
793
794 BT_DBG("sock %p sk %p", sock, sk);
795
796 if (peer)
797 return -EOPNOTSUPP;
798
799 lock_sock(sk);
800
801 hdev = hci_pi(sk)->hdev;
802 if (!hdev) {
803 err = -EBADFD;
804 goto done;
805 }
806
807 *addr_len = sizeof(*haddr);
808 haddr->hci_family = AF_BLUETOOTH;
809 haddr->hci_dev = hdev->id;
810 haddr->hci_channel= hci_pi(sk)->channel;
811
812 done:
813 release_sock(sk);
814 return err;
815 }
816
hci_sock_cmsg(struct sock * sk,struct msghdr * msg,struct sk_buff * skb)817 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
818 struct sk_buff *skb)
819 {
820 __u32 mask = hci_pi(sk)->cmsg_mask;
821
822 if (mask & HCI_CMSG_DIR) {
823 int incoming = bt_cb(skb)->incoming;
824 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
825 &incoming);
826 }
827
828 if (mask & HCI_CMSG_TSTAMP) {
829 #ifdef CONFIG_COMPAT
830 struct compat_timeval ctv;
831 #endif
832 struct timeval tv;
833 void *data;
834 int len;
835
836 skb_get_timestamp(skb, &tv);
837
838 data = &tv;
839 len = sizeof(tv);
840 #ifdef CONFIG_COMPAT
841 if (!COMPAT_USE_64BIT_TIME &&
842 (msg->msg_flags & MSG_CMSG_COMPAT)) {
843 ctv.tv_sec = tv.tv_sec;
844 ctv.tv_usec = tv.tv_usec;
845 data = &ctv;
846 len = sizeof(ctv);
847 }
848 #endif
849
850 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
851 }
852 }
853
hci_sock_recvmsg(struct kiocb * iocb,struct socket * sock,struct msghdr * msg,size_t len,int flags)854 static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
855 struct msghdr *msg, size_t len, int flags)
856 {
857 int noblock = flags & MSG_DONTWAIT;
858 struct sock *sk = sock->sk;
859 struct sk_buff *skb;
860 int copied, err;
861
862 BT_DBG("sock %p, sk %p", sock, sk);
863
864 if (flags & (MSG_OOB))
865 return -EOPNOTSUPP;
866
867 if (sk->sk_state == BT_CLOSED)
868 return 0;
869
870 skb = skb_recv_datagram(sk, flags, noblock, &err);
871 if (!skb)
872 return err;
873
874 copied = skb->len;
875 if (len < copied) {
876 msg->msg_flags |= MSG_TRUNC;
877 copied = len;
878 }
879
880 skb_reset_transport_header(skb);
881 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
882
883 switch (hci_pi(sk)->channel) {
884 case HCI_CHANNEL_RAW:
885 hci_sock_cmsg(sk, msg, skb);
886 break;
887 case HCI_CHANNEL_USER:
888 case HCI_CHANNEL_CONTROL:
889 case HCI_CHANNEL_MONITOR:
890 sock_recv_timestamp(msg, sk, skb);
891 break;
892 }
893
894 skb_free_datagram(sk, skb);
895
896 return err ? : copied;
897 }
898
hci_sock_sendmsg(struct kiocb * iocb,struct socket * sock,struct msghdr * msg,size_t len)899 static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
900 struct msghdr *msg, size_t len)
901 {
902 struct sock *sk = sock->sk;
903 struct hci_dev *hdev;
904 struct sk_buff *skb;
905 int err;
906
907 BT_DBG("sock %p sk %p", sock, sk);
908
909 if (msg->msg_flags & MSG_OOB)
910 return -EOPNOTSUPP;
911
912 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
913 MSG_CMSG_COMPAT))
914 return -EINVAL;
915
916 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
917 return -EINVAL;
918
919 lock_sock(sk);
920
921 switch (hci_pi(sk)->channel) {
922 case HCI_CHANNEL_RAW:
923 case HCI_CHANNEL_USER:
924 break;
925 case HCI_CHANNEL_CONTROL:
926 err = mgmt_control(sk, msg, len);
927 goto done;
928 case HCI_CHANNEL_MONITOR:
929 err = -EOPNOTSUPP;
930 goto done;
931 default:
932 err = -EINVAL;
933 goto done;
934 }
935
936 hdev = hci_pi(sk)->hdev;
937 if (!hdev) {
938 err = -EBADFD;
939 goto done;
940 }
941
942 if (!test_bit(HCI_UP, &hdev->flags)) {
943 err = -ENETDOWN;
944 goto done;
945 }
946
947 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
948 if (!skb)
949 goto done;
950
951 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
952 err = -EFAULT;
953 goto drop;
954 }
955
956 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
957 skb_pull(skb, 1);
958
959 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
960 /* No permission check is needed for user channel
961 * since that gets enforced when binding the socket.
962 *
963 * However check that the packet type is valid.
964 */
965 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
966 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
967 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
968 err = -EINVAL;
969 goto drop;
970 }
971
972 skb_queue_tail(&hdev->raw_q, skb);
973 queue_work(hdev->workqueue, &hdev->tx_work);
974 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
975 u16 opcode = get_unaligned_le16(skb->data);
976 u16 ogf = hci_opcode_ogf(opcode);
977 u16 ocf = hci_opcode_ocf(opcode);
978
979 if (((ogf > HCI_SFLT_MAX_OGF) ||
980 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
981 &hci_sec_filter.ocf_mask[ogf])) &&
982 !capable(CAP_NET_RAW)) {
983 err = -EPERM;
984 goto drop;
985 }
986
987 if (ogf == 0x3f) {
988 skb_queue_tail(&hdev->raw_q, skb);
989 queue_work(hdev->workqueue, &hdev->tx_work);
990 } else {
991 /* Stand-alone HCI commands must be flaged as
992 * single-command requests.
993 */
994 bt_cb(skb)->req.start = true;
995
996 skb_queue_tail(&hdev->cmd_q, skb);
997 queue_work(hdev->workqueue, &hdev->cmd_work);
998 }
999 } else {
1000 if (!capable(CAP_NET_RAW)) {
1001 err = -EPERM;
1002 goto drop;
1003 }
1004
1005 skb_queue_tail(&hdev->raw_q, skb);
1006 queue_work(hdev->workqueue, &hdev->tx_work);
1007 }
1008
1009 err = len;
1010
1011 done:
1012 release_sock(sk);
1013 return err;
1014
1015 drop:
1016 kfree_skb(skb);
1017 goto done;
1018 }
1019
hci_sock_setsockopt(struct socket * sock,int level,int optname,char __user * optval,unsigned int len)1020 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1021 char __user *optval, unsigned int len)
1022 {
1023 struct hci_ufilter uf = { .opcode = 0 };
1024 struct sock *sk = sock->sk;
1025 int err = 0, opt = 0;
1026
1027 BT_DBG("sk %p, opt %d", sk, optname);
1028
1029 lock_sock(sk);
1030
1031 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1032 err = -EBADFD;
1033 goto done;
1034 }
1035
1036 switch (optname) {
1037 case HCI_DATA_DIR:
1038 if (get_user(opt, (int __user *)optval)) {
1039 err = -EFAULT;
1040 break;
1041 }
1042
1043 if (opt)
1044 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1045 else
1046 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1047 break;
1048
1049 case HCI_TIME_STAMP:
1050 if (get_user(opt, (int __user *)optval)) {
1051 err = -EFAULT;
1052 break;
1053 }
1054
1055 if (opt)
1056 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1057 else
1058 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1059 break;
1060
1061 case HCI_FILTER:
1062 {
1063 struct hci_filter *f = &hci_pi(sk)->filter;
1064
1065 uf.type_mask = f->type_mask;
1066 uf.opcode = f->opcode;
1067 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1068 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1069 }
1070
1071 len = min_t(unsigned int, len, sizeof(uf));
1072 if (copy_from_user(&uf, optval, len)) {
1073 err = -EFAULT;
1074 break;
1075 }
1076
1077 if (!capable(CAP_NET_RAW)) {
1078 uf.type_mask &= hci_sec_filter.type_mask;
1079 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1080 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1081 }
1082
1083 {
1084 struct hci_filter *f = &hci_pi(sk)->filter;
1085
1086 f->type_mask = uf.type_mask;
1087 f->opcode = uf.opcode;
1088 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1089 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1090 }
1091 break;
1092
1093 default:
1094 err = -ENOPROTOOPT;
1095 break;
1096 }
1097
1098 done:
1099 release_sock(sk);
1100 return err;
1101 }
1102
hci_sock_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1103 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1104 char __user *optval, int __user *optlen)
1105 {
1106 struct hci_ufilter uf;
1107 struct sock *sk = sock->sk;
1108 int len, opt, err = 0;
1109
1110 BT_DBG("sk %p, opt %d", sk, optname);
1111
1112 if (get_user(len, optlen))
1113 return -EFAULT;
1114
1115 lock_sock(sk);
1116
1117 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1118 err = -EBADFD;
1119 goto done;
1120 }
1121
1122 switch (optname) {
1123 case HCI_DATA_DIR:
1124 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1125 opt = 1;
1126 else
1127 opt = 0;
1128
1129 if (put_user(opt, optval))
1130 err = -EFAULT;
1131 break;
1132
1133 case HCI_TIME_STAMP:
1134 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1135 opt = 1;
1136 else
1137 opt = 0;
1138
1139 if (put_user(opt, optval))
1140 err = -EFAULT;
1141 break;
1142
1143 case HCI_FILTER:
1144 {
1145 struct hci_filter *f = &hci_pi(sk)->filter;
1146
1147 memset(&uf, 0, sizeof(uf));
1148 uf.type_mask = f->type_mask;
1149 uf.opcode = f->opcode;
1150 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1151 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1152 }
1153
1154 len = min_t(unsigned int, len, sizeof(uf));
1155 if (copy_to_user(optval, &uf, len))
1156 err = -EFAULT;
1157 break;
1158
1159 default:
1160 err = -ENOPROTOOPT;
1161 break;
1162 }
1163
1164 done:
1165 release_sock(sk);
1166 return err;
1167 }
1168
1169 static const struct proto_ops hci_sock_ops = {
1170 .family = PF_BLUETOOTH,
1171 .owner = THIS_MODULE,
1172 .release = hci_sock_release,
1173 .bind = hci_sock_bind,
1174 .getname = hci_sock_getname,
1175 .sendmsg = hci_sock_sendmsg,
1176 .recvmsg = hci_sock_recvmsg,
1177 .ioctl = hci_sock_ioctl,
1178 .poll = datagram_poll,
1179 .listen = sock_no_listen,
1180 .shutdown = sock_no_shutdown,
1181 .setsockopt = hci_sock_setsockopt,
1182 .getsockopt = hci_sock_getsockopt,
1183 .connect = sock_no_connect,
1184 .socketpair = sock_no_socketpair,
1185 .accept = sock_no_accept,
1186 .mmap = sock_no_mmap
1187 };
1188
1189 static struct proto hci_sk_proto = {
1190 .name = "HCI",
1191 .owner = THIS_MODULE,
1192 .obj_size = sizeof(struct hci_pinfo)
1193 };
1194
hci_sock_create(struct net * net,struct socket * sock,int protocol,int kern)1195 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1196 int kern)
1197 {
1198 struct sock *sk;
1199
1200 BT_DBG("sock %p", sock);
1201
1202 if (sock->type != SOCK_RAW)
1203 return -ESOCKTNOSUPPORT;
1204
1205 sock->ops = &hci_sock_ops;
1206
1207 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1208 if (!sk)
1209 return -ENOMEM;
1210
1211 sock_init_data(sock, sk);
1212
1213 sock_reset_flag(sk, SOCK_ZAPPED);
1214
1215 sk->sk_protocol = protocol;
1216
1217 sock->state = SS_UNCONNECTED;
1218 sk->sk_state = BT_OPEN;
1219
1220 bt_sock_link(&hci_sk_list, sk);
1221 return 0;
1222 }
1223
1224 static const struct net_proto_family hci_sock_family_ops = {
1225 .family = PF_BLUETOOTH,
1226 .owner = THIS_MODULE,
1227 .create = hci_sock_create,
1228 };
1229
hci_sock_init(void)1230 int __init hci_sock_init(void)
1231 {
1232 int err;
1233
1234 err = proto_register(&hci_sk_proto, 0);
1235 if (err < 0)
1236 return err;
1237
1238 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1239 if (err < 0) {
1240 BT_ERR("HCI socket registration failed");
1241 goto error;
1242 }
1243
1244 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1245 if (err < 0) {
1246 BT_ERR("Failed to create HCI proc file");
1247 bt_sock_unregister(BTPROTO_HCI);
1248 goto error;
1249 }
1250
1251 BT_INFO("HCI socket layer initialized");
1252
1253 return 0;
1254
1255 error:
1256 proto_unregister(&hci_sk_proto);
1257 return err;
1258 }
1259
hci_sock_cleanup(void)1260 void hci_sock_cleanup(void)
1261 {
1262 bt_procfs_cleanup(&init_net, "hci");
1263 bt_sock_unregister(BTPROTO_HCI);
1264 proto_unregister(&hci_sk_proto);
1265 }
1266