1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26 #include <linux/compat.h>
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "mgmt_util.h"
38
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42 static DEFINE_IDA(sock_cookie_ida);
43
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46 /* ----- HCI socket interface ----- */
47
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51 struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u8 cmsg_mask;
56 unsigned short channel;
57 unsigned long flags;
58 __u32 cookie;
59 char comm[TASK_COMM_LEN];
60 };
61
hci_hdev_from_sock(struct sock * sk)62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
63 {
64 struct hci_dev *hdev = hci_pi(sk)->hdev;
65
66 if (!hdev)
67 return ERR_PTR(-EBADFD);
68 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69 return ERR_PTR(-EPIPE);
70 return hdev;
71 }
72
hci_sock_set_flag(struct sock * sk,int nr)73 void hci_sock_set_flag(struct sock *sk, int nr)
74 {
75 set_bit(nr, &hci_pi(sk)->flags);
76 }
77
hci_sock_clear_flag(struct sock * sk,int nr)78 void hci_sock_clear_flag(struct sock *sk, int nr)
79 {
80 clear_bit(nr, &hci_pi(sk)->flags);
81 }
82
hci_sock_test_flag(struct sock * sk,int nr)83 int hci_sock_test_flag(struct sock *sk, int nr)
84 {
85 return test_bit(nr, &hci_pi(sk)->flags);
86 }
87
hci_sock_get_channel(struct sock * sk)88 unsigned short hci_sock_get_channel(struct sock *sk)
89 {
90 return hci_pi(sk)->channel;
91 }
92
hci_sock_get_cookie(struct sock * sk)93 u32 hci_sock_get_cookie(struct sock *sk)
94 {
95 return hci_pi(sk)->cookie;
96 }
97
hci_sock_gen_cookie(struct sock * sk)98 static bool hci_sock_gen_cookie(struct sock *sk)
99 {
100 int id = hci_pi(sk)->cookie;
101
102 if (!id) {
103 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
104 if (id < 0)
105 id = 0xffffffff;
106
107 hci_pi(sk)->cookie = id;
108 get_task_comm(hci_pi(sk)->comm, current);
109 return true;
110 }
111
112 return false;
113 }
114
hci_sock_free_cookie(struct sock * sk)115 static void hci_sock_free_cookie(struct sock *sk)
116 {
117 int id = hci_pi(sk)->cookie;
118
119 if (id) {
120 hci_pi(sk)->cookie = 0xffffffff;
121 ida_simple_remove(&sock_cookie_ida, id);
122 }
123 }
124
hci_test_bit(int nr,const void * addr)125 static inline int hci_test_bit(int nr, const void *addr)
126 {
127 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
128 }
129
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF 5
132
133 struct hci_sec_filter {
134 __u32 type_mask;
135 __u32 event_mask[2];
136 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
137 };
138
139 static const struct hci_sec_filter hci_sec_filter = {
140 /* Packet types */
141 0x10,
142 /* Events */
143 { 0x1000d9fe, 0x0000b00c },
144 /* Commands */
145 {
146 { 0x0 },
147 /* OGF_LINK_CTL */
148 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149 /* OGF_LINK_POLICY */
150 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
151 /* OGF_HOST_CTL */
152 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
153 /* OGF_INFO_PARAM */
154 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155 /* OGF_STATUS_PARAM */
156 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
157 }
158 };
159
160 static struct bt_sock_list hci_sk_list = {
161 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
162 };
163
is_filtered_packet(struct sock * sk,struct sk_buff * skb)164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
165 {
166 struct hci_filter *flt;
167 int flt_type, flt_event;
168
169 /* Apply filter */
170 flt = &hci_pi(sk)->filter;
171
172 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
173
174 if (!test_bit(flt_type, &flt->type_mask))
175 return true;
176
177 /* Extra filter for event packets only */
178 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
179 return false;
180
181 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
182
183 if (!hci_test_bit(flt_event, &flt->event_mask))
184 return true;
185
186 /* Check filter only when opcode is set */
187 if (!flt->opcode)
188 return false;
189
190 if (flt_event == HCI_EV_CMD_COMPLETE &&
191 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
192 return true;
193
194 if (flt_event == HCI_EV_CMD_STATUS &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
196 return true;
197
198 return false;
199 }
200
201 /* Send frame to RAW socket */
hci_send_to_sock(struct hci_dev * hdev,struct sk_buff * skb)202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
203 {
204 struct sock *sk;
205 struct sk_buff *skb_copy = NULL;
206
207 BT_DBG("hdev %p len %d", hdev, skb->len);
208
209 read_lock(&hci_sk_list.lock);
210
211 sk_for_each(sk, &hci_sk_list.head) {
212 struct sk_buff *nskb;
213
214 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
215 continue;
216
217 /* Don't send frame to the socket it came from */
218 if (skb->sk == sk)
219 continue;
220
221 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
226 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
227 continue;
228 if (is_filtered_packet(sk, skb))
229 continue;
230 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
231 if (!bt_cb(skb)->incoming)
232 continue;
233 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
234 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
235 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
236 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT)
237 continue;
238 } else {
239 /* Don't send frame to other channel types */
240 continue;
241 }
242
243 if (!skb_copy) {
244 /* Create a private copy with headroom */
245 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
246 if (!skb_copy)
247 continue;
248
249 /* Put type byte before the data */
250 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
251 }
252
253 nskb = skb_clone(skb_copy, GFP_ATOMIC);
254 if (!nskb)
255 continue;
256
257 if (sock_queue_rcv_skb(sk, nskb))
258 kfree_skb(nskb);
259 }
260
261 read_unlock(&hci_sk_list.lock);
262
263 kfree_skb(skb_copy);
264 }
265
266 /* Send frame to sockets with specific channel */
__hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)267 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
268 int flag, struct sock *skip_sk)
269 {
270 struct sock *sk;
271
272 BT_DBG("channel %u len %d", channel, skb->len);
273
274 sk_for_each(sk, &hci_sk_list.head) {
275 struct sk_buff *nskb;
276
277 /* Ignore socket without the flag set */
278 if (!hci_sock_test_flag(sk, flag))
279 continue;
280
281 /* Skip the original socket */
282 if (sk == skip_sk)
283 continue;
284
285 if (sk->sk_state != BT_BOUND)
286 continue;
287
288 if (hci_pi(sk)->channel != channel)
289 continue;
290
291 nskb = skb_clone(skb, GFP_ATOMIC);
292 if (!nskb)
293 continue;
294
295 if (sock_queue_rcv_skb(sk, nskb))
296 kfree_skb(nskb);
297 }
298
299 }
300
hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)301 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
302 int flag, struct sock *skip_sk)
303 {
304 read_lock(&hci_sk_list.lock);
305 __hci_send_to_channel(channel, skb, flag, skip_sk);
306 read_unlock(&hci_sk_list.lock);
307 }
308
309 /* Send frame to monitor socket */
hci_send_to_monitor(struct hci_dev * hdev,struct sk_buff * skb)310 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
311 {
312 struct sk_buff *skb_copy = NULL;
313 struct hci_mon_hdr *hdr;
314 __le16 opcode;
315
316 if (!atomic_read(&monitor_promisc))
317 return;
318
319 BT_DBG("hdev %p len %d", hdev, skb->len);
320
321 switch (hci_skb_pkt_type(skb)) {
322 case HCI_COMMAND_PKT:
323 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
324 break;
325 case HCI_EVENT_PKT:
326 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
327 break;
328 case HCI_ACLDATA_PKT:
329 if (bt_cb(skb)->incoming)
330 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
331 else
332 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
333 break;
334 case HCI_SCODATA_PKT:
335 if (bt_cb(skb)->incoming)
336 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
337 else
338 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
339 break;
340 case HCI_ISODATA_PKT:
341 if (bt_cb(skb)->incoming)
342 opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT);
343 else
344 opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT);
345 break;
346 case HCI_DIAG_PKT:
347 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
348 break;
349 default:
350 return;
351 }
352
353 /* Create a private copy with headroom */
354 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
355 if (!skb_copy)
356 return;
357
358 /* Put header before the data */
359 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
360 hdr->opcode = opcode;
361 hdr->index = cpu_to_le16(hdev->id);
362 hdr->len = cpu_to_le16(skb->len);
363
364 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
365 HCI_SOCK_TRUSTED, NULL);
366 kfree_skb(skb_copy);
367 }
368
hci_send_monitor_ctrl_event(struct hci_dev * hdev,u16 event,void * data,u16 data_len,ktime_t tstamp,int flag,struct sock * skip_sk)369 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
370 void *data, u16 data_len, ktime_t tstamp,
371 int flag, struct sock *skip_sk)
372 {
373 struct sock *sk;
374 __le16 index;
375
376 if (hdev)
377 index = cpu_to_le16(hdev->id);
378 else
379 index = cpu_to_le16(MGMT_INDEX_NONE);
380
381 read_lock(&hci_sk_list.lock);
382
383 sk_for_each(sk, &hci_sk_list.head) {
384 struct hci_mon_hdr *hdr;
385 struct sk_buff *skb;
386
387 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
388 continue;
389
390 /* Ignore socket without the flag set */
391 if (!hci_sock_test_flag(sk, flag))
392 continue;
393
394 /* Skip the original socket */
395 if (sk == skip_sk)
396 continue;
397
398 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
399 if (!skb)
400 continue;
401
402 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
403 put_unaligned_le16(event, skb_put(skb, 2));
404
405 if (data)
406 skb_put_data(skb, data, data_len);
407
408 skb->tstamp = tstamp;
409
410 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
411 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
412 hdr->index = index;
413 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
414
415 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
416 HCI_SOCK_TRUSTED, NULL);
417 kfree_skb(skb);
418 }
419
420 read_unlock(&hci_sk_list.lock);
421 }
422
create_monitor_event(struct hci_dev * hdev,int event)423 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
424 {
425 struct hci_mon_hdr *hdr;
426 struct hci_mon_new_index *ni;
427 struct hci_mon_index_info *ii;
428 struct sk_buff *skb;
429 __le16 opcode;
430
431 switch (event) {
432 case HCI_DEV_REG:
433 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
434 if (!skb)
435 return NULL;
436
437 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
438 ni->type = hdev->dev_type;
439 ni->bus = hdev->bus;
440 bacpy(&ni->bdaddr, &hdev->bdaddr);
441 memcpy(ni->name, hdev->name, 8);
442
443 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
444 break;
445
446 case HCI_DEV_UNREG:
447 skb = bt_skb_alloc(0, GFP_ATOMIC);
448 if (!skb)
449 return NULL;
450
451 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
452 break;
453
454 case HCI_DEV_SETUP:
455 if (hdev->manufacturer == 0xffff)
456 return NULL;
457 fallthrough;
458
459 case HCI_DEV_UP:
460 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
461 if (!skb)
462 return NULL;
463
464 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
465 bacpy(&ii->bdaddr, &hdev->bdaddr);
466 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
467
468 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
469 break;
470
471 case HCI_DEV_OPEN:
472 skb = bt_skb_alloc(0, GFP_ATOMIC);
473 if (!skb)
474 return NULL;
475
476 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
477 break;
478
479 case HCI_DEV_CLOSE:
480 skb = bt_skb_alloc(0, GFP_ATOMIC);
481 if (!skb)
482 return NULL;
483
484 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
485 break;
486
487 default:
488 return NULL;
489 }
490
491 __net_timestamp(skb);
492
493 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
494 hdr->opcode = opcode;
495 hdr->index = cpu_to_le16(hdev->id);
496 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
497
498 return skb;
499 }
500
create_monitor_ctrl_open(struct sock * sk)501 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
502 {
503 struct hci_mon_hdr *hdr;
504 struct sk_buff *skb;
505 u16 format;
506 u8 ver[3];
507 u32 flags;
508
509 /* No message needed when cookie is not present */
510 if (!hci_pi(sk)->cookie)
511 return NULL;
512
513 switch (hci_pi(sk)->channel) {
514 case HCI_CHANNEL_RAW:
515 format = 0x0000;
516 ver[0] = BT_SUBSYS_VERSION;
517 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
518 break;
519 case HCI_CHANNEL_USER:
520 format = 0x0001;
521 ver[0] = BT_SUBSYS_VERSION;
522 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
523 break;
524 case HCI_CHANNEL_CONTROL:
525 format = 0x0002;
526 mgmt_fill_version_info(ver);
527 break;
528 default:
529 /* No message for unsupported format */
530 return NULL;
531 }
532
533 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
534 if (!skb)
535 return NULL;
536
537 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
538
539 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
540 put_unaligned_le16(format, skb_put(skb, 2));
541 skb_put_data(skb, ver, sizeof(ver));
542 put_unaligned_le32(flags, skb_put(skb, 4));
543 skb_put_u8(skb, TASK_COMM_LEN);
544 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
545
546 __net_timestamp(skb);
547
548 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
549 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
550 if (hci_pi(sk)->hdev)
551 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
552 else
553 hdr->index = cpu_to_le16(HCI_DEV_NONE);
554 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
555
556 return skb;
557 }
558
create_monitor_ctrl_close(struct sock * sk)559 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
560 {
561 struct hci_mon_hdr *hdr;
562 struct sk_buff *skb;
563
564 /* No message needed when cookie is not present */
565 if (!hci_pi(sk)->cookie)
566 return NULL;
567
568 switch (hci_pi(sk)->channel) {
569 case HCI_CHANNEL_RAW:
570 case HCI_CHANNEL_USER:
571 case HCI_CHANNEL_CONTROL:
572 break;
573 default:
574 /* No message for unsupported format */
575 return NULL;
576 }
577
578 skb = bt_skb_alloc(4, GFP_ATOMIC);
579 if (!skb)
580 return NULL;
581
582 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
583
584 __net_timestamp(skb);
585
586 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
587 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
588 if (hci_pi(sk)->hdev)
589 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
590 else
591 hdr->index = cpu_to_le16(HCI_DEV_NONE);
592 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
593
594 return skb;
595 }
596
create_monitor_ctrl_command(struct sock * sk,u16 index,u16 opcode,u16 len,const void * buf)597 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
598 u16 opcode, u16 len,
599 const void *buf)
600 {
601 struct hci_mon_hdr *hdr;
602 struct sk_buff *skb;
603
604 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
605 if (!skb)
606 return NULL;
607
608 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
609 put_unaligned_le16(opcode, skb_put(skb, 2));
610
611 if (buf)
612 skb_put_data(skb, buf, len);
613
614 __net_timestamp(skb);
615
616 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
617 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
618 hdr->index = cpu_to_le16(index);
619 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
620
621 return skb;
622 }
623
624 static void __printf(2, 3)
send_monitor_note(struct sock * sk,const char * fmt,...)625 send_monitor_note(struct sock *sk, const char *fmt, ...)
626 {
627 size_t len;
628 struct hci_mon_hdr *hdr;
629 struct sk_buff *skb;
630 va_list args;
631
632 va_start(args, fmt);
633 len = vsnprintf(NULL, 0, fmt, args);
634 va_end(args);
635
636 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
637 if (!skb)
638 return;
639
640 va_start(args, fmt);
641 vsprintf(skb_put(skb, len), fmt, args);
642 *(u8 *)skb_put(skb, 1) = 0;
643 va_end(args);
644
645 __net_timestamp(skb);
646
647 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
648 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
649 hdr->index = cpu_to_le16(HCI_DEV_NONE);
650 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
651
652 if (sock_queue_rcv_skb(sk, skb))
653 kfree_skb(skb);
654 }
655
send_monitor_replay(struct sock * sk)656 static void send_monitor_replay(struct sock *sk)
657 {
658 struct hci_dev *hdev;
659
660 read_lock(&hci_dev_list_lock);
661
662 list_for_each_entry(hdev, &hci_dev_list, list) {
663 struct sk_buff *skb;
664
665 skb = create_monitor_event(hdev, HCI_DEV_REG);
666 if (!skb)
667 continue;
668
669 if (sock_queue_rcv_skb(sk, skb))
670 kfree_skb(skb);
671
672 if (!test_bit(HCI_RUNNING, &hdev->flags))
673 continue;
674
675 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
676 if (!skb)
677 continue;
678
679 if (sock_queue_rcv_skb(sk, skb))
680 kfree_skb(skb);
681
682 if (test_bit(HCI_UP, &hdev->flags))
683 skb = create_monitor_event(hdev, HCI_DEV_UP);
684 else if (hci_dev_test_flag(hdev, HCI_SETUP))
685 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
686 else
687 skb = NULL;
688
689 if (skb) {
690 if (sock_queue_rcv_skb(sk, skb))
691 kfree_skb(skb);
692 }
693 }
694
695 read_unlock(&hci_dev_list_lock);
696 }
697
send_monitor_control_replay(struct sock * mon_sk)698 static void send_monitor_control_replay(struct sock *mon_sk)
699 {
700 struct sock *sk;
701
702 read_lock(&hci_sk_list.lock);
703
704 sk_for_each(sk, &hci_sk_list.head) {
705 struct sk_buff *skb;
706
707 skb = create_monitor_ctrl_open(sk);
708 if (!skb)
709 continue;
710
711 if (sock_queue_rcv_skb(mon_sk, skb))
712 kfree_skb(skb);
713 }
714
715 read_unlock(&hci_sk_list.lock);
716 }
717
718 /* Generate internal stack event */
hci_si_event(struct hci_dev * hdev,int type,int dlen,void * data)719 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
720 {
721 struct hci_event_hdr *hdr;
722 struct hci_ev_stack_internal *ev;
723 struct sk_buff *skb;
724
725 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
726 if (!skb)
727 return;
728
729 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
730 hdr->evt = HCI_EV_STACK_INTERNAL;
731 hdr->plen = sizeof(*ev) + dlen;
732
733 ev = skb_put(skb, sizeof(*ev) + dlen);
734 ev->type = type;
735 memcpy(ev->data, data, dlen);
736
737 bt_cb(skb)->incoming = 1;
738 __net_timestamp(skb);
739
740 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
741 hci_send_to_sock(hdev, skb);
742 kfree_skb(skb);
743 }
744
hci_sock_dev_event(struct hci_dev * hdev,int event)745 void hci_sock_dev_event(struct hci_dev *hdev, int event)
746 {
747 BT_DBG("hdev %s event %d", hdev->name, event);
748
749 if (atomic_read(&monitor_promisc)) {
750 struct sk_buff *skb;
751
752 /* Send event to monitor */
753 skb = create_monitor_event(hdev, event);
754 if (skb) {
755 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
756 HCI_SOCK_TRUSTED, NULL);
757 kfree_skb(skb);
758 }
759 }
760
761 if (event <= HCI_DEV_DOWN) {
762 struct hci_ev_si_device ev;
763
764 /* Send event to sockets */
765 ev.event = event;
766 ev.dev_id = hdev->id;
767 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
768 }
769
770 if (event == HCI_DEV_UNREG) {
771 struct sock *sk;
772
773 /* Wake up sockets using this dead device */
774 read_lock(&hci_sk_list.lock);
775 sk_for_each(sk, &hci_sk_list.head) {
776 if (hci_pi(sk)->hdev == hdev) {
777 sk->sk_err = EPIPE;
778 sk->sk_state_change(sk);
779 }
780 }
781 read_unlock(&hci_sk_list.lock);
782 }
783 }
784
__hci_mgmt_chan_find(unsigned short channel)785 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
786 {
787 struct hci_mgmt_chan *c;
788
789 list_for_each_entry(c, &mgmt_chan_list, list) {
790 if (c->channel == channel)
791 return c;
792 }
793
794 return NULL;
795 }
796
hci_mgmt_chan_find(unsigned short channel)797 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
798 {
799 struct hci_mgmt_chan *c;
800
801 mutex_lock(&mgmt_chan_list_lock);
802 c = __hci_mgmt_chan_find(channel);
803 mutex_unlock(&mgmt_chan_list_lock);
804
805 return c;
806 }
807
hci_mgmt_chan_register(struct hci_mgmt_chan * c)808 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
809 {
810 if (c->channel < HCI_CHANNEL_CONTROL)
811 return -EINVAL;
812
813 mutex_lock(&mgmt_chan_list_lock);
814 if (__hci_mgmt_chan_find(c->channel)) {
815 mutex_unlock(&mgmt_chan_list_lock);
816 return -EALREADY;
817 }
818
819 list_add_tail(&c->list, &mgmt_chan_list);
820
821 mutex_unlock(&mgmt_chan_list_lock);
822
823 return 0;
824 }
825 EXPORT_SYMBOL(hci_mgmt_chan_register);
826
hci_mgmt_chan_unregister(struct hci_mgmt_chan * c)827 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
828 {
829 mutex_lock(&mgmt_chan_list_lock);
830 list_del(&c->list);
831 mutex_unlock(&mgmt_chan_list_lock);
832 }
833 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
834
hci_sock_release(struct socket * sock)835 static int hci_sock_release(struct socket *sock)
836 {
837 struct sock *sk = sock->sk;
838 struct hci_dev *hdev;
839 struct sk_buff *skb;
840
841 BT_DBG("sock %p sk %p", sock, sk);
842
843 if (!sk)
844 return 0;
845
846 lock_sock(sk);
847
848 switch (hci_pi(sk)->channel) {
849 case HCI_CHANNEL_MONITOR:
850 atomic_dec(&monitor_promisc);
851 break;
852 case HCI_CHANNEL_RAW:
853 case HCI_CHANNEL_USER:
854 case HCI_CHANNEL_CONTROL:
855 /* Send event to monitor */
856 skb = create_monitor_ctrl_close(sk);
857 if (skb) {
858 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
859 HCI_SOCK_TRUSTED, NULL);
860 kfree_skb(skb);
861 }
862
863 hci_sock_free_cookie(sk);
864 break;
865 }
866
867 bt_sock_unlink(&hci_sk_list, sk);
868
869 hdev = hci_pi(sk)->hdev;
870 if (hdev) {
871 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
872 /* When releasing a user channel exclusive access,
873 * call hci_dev_do_close directly instead of calling
874 * hci_dev_close to ensure the exclusive access will
875 * be released and the controller brought back down.
876 *
877 * The checking of HCI_AUTO_OFF is not needed in this
878 * case since it will have been cleared already when
879 * opening the user channel.
880 */
881 hci_dev_do_close(hdev);
882 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
883 mgmt_index_added(hdev);
884 }
885
886 atomic_dec(&hdev->promisc);
887 hci_dev_put(hdev);
888 }
889
890 sock_orphan(sk);
891 release_sock(sk);
892 sock_put(sk);
893 return 0;
894 }
895
hci_sock_reject_list_add(struct hci_dev * hdev,void __user * arg)896 static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg)
897 {
898 bdaddr_t bdaddr;
899 int err;
900
901 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
902 return -EFAULT;
903
904 hci_dev_lock(hdev);
905
906 err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
907
908 hci_dev_unlock(hdev);
909
910 return err;
911 }
912
hci_sock_reject_list_del(struct hci_dev * hdev,void __user * arg)913 static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg)
914 {
915 bdaddr_t bdaddr;
916 int err;
917
918 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
919 return -EFAULT;
920
921 hci_dev_lock(hdev);
922
923 err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR);
924
925 hci_dev_unlock(hdev);
926
927 return err;
928 }
929
930 /* Ioctls that require bound socket */
hci_sock_bound_ioctl(struct sock * sk,unsigned int cmd,unsigned long arg)931 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
932 unsigned long arg)
933 {
934 struct hci_dev *hdev = hci_hdev_from_sock(sk);
935
936 if (IS_ERR(hdev))
937 return PTR_ERR(hdev);
938
939 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
940 return -EBUSY;
941
942 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
943 return -EOPNOTSUPP;
944
945 if (hdev->dev_type != HCI_PRIMARY)
946 return -EOPNOTSUPP;
947
948 switch (cmd) {
949 case HCISETRAW:
950 if (!capable(CAP_NET_ADMIN))
951 return -EPERM;
952 return -EOPNOTSUPP;
953
954 case HCIGETCONNINFO:
955 return hci_get_conn_info(hdev, (void __user *)arg);
956
957 case HCIGETAUTHINFO:
958 return hci_get_auth_info(hdev, (void __user *)arg);
959
960 case HCIBLOCKADDR:
961 if (!capable(CAP_NET_ADMIN))
962 return -EPERM;
963 return hci_sock_reject_list_add(hdev, (void __user *)arg);
964
965 case HCIUNBLOCKADDR:
966 if (!capable(CAP_NET_ADMIN))
967 return -EPERM;
968 return hci_sock_reject_list_del(hdev, (void __user *)arg);
969 }
970
971 return -ENOIOCTLCMD;
972 }
973
hci_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)974 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
975 unsigned long arg)
976 {
977 void __user *argp = (void __user *)arg;
978 struct sock *sk = sock->sk;
979 int err;
980
981 BT_DBG("cmd %x arg %lx", cmd, arg);
982
983 /* Make sure the cmd is valid before doing anything */
984 switch (cmd) {
985 case HCIGETDEVLIST:
986 case HCIGETDEVINFO:
987 case HCIGETCONNLIST:
988 case HCIDEVUP:
989 case HCIDEVDOWN:
990 case HCIDEVRESET:
991 case HCIDEVRESTAT:
992 case HCISETSCAN:
993 case HCISETAUTH:
994 case HCISETENCRYPT:
995 case HCISETPTYPE:
996 case HCISETLINKPOL:
997 case HCISETLINKMODE:
998 case HCISETACLMTU:
999 case HCISETSCOMTU:
1000 case HCIINQUIRY:
1001 case HCISETRAW:
1002 case HCIGETCONNINFO:
1003 case HCIGETAUTHINFO:
1004 case HCIBLOCKADDR:
1005 case HCIUNBLOCKADDR:
1006 break;
1007 default:
1008 return -ENOIOCTLCMD;
1009 }
1010
1011 lock_sock(sk);
1012
1013 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1014 err = -EBADFD;
1015 goto done;
1016 }
1017
1018 /* When calling an ioctl on an unbound raw socket, then ensure
1019 * that the monitor gets informed. Ensure that the resulting event
1020 * is only send once by checking if the cookie exists or not. The
1021 * socket cookie will be only ever generated once for the lifetime
1022 * of a given socket.
1023 */
1024 if (hci_sock_gen_cookie(sk)) {
1025 struct sk_buff *skb;
1026
1027 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1028 * flag. Make sure that not only the current task but also
1029 * the socket opener has the required capability, since
1030 * privileged programs can be tricked into making ioctl calls
1031 * on HCI sockets, and the socket should not be marked as
1032 * trusted simply because the ioctl caller is privileged.
1033 */
1034 if (sk_capable(sk, CAP_NET_ADMIN))
1035 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1036
1037 /* Send event to monitor */
1038 skb = create_monitor_ctrl_open(sk);
1039 if (skb) {
1040 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1041 HCI_SOCK_TRUSTED, NULL);
1042 kfree_skb(skb);
1043 }
1044 }
1045
1046 release_sock(sk);
1047
1048 switch (cmd) {
1049 case HCIGETDEVLIST:
1050 return hci_get_dev_list(argp);
1051
1052 case HCIGETDEVINFO:
1053 return hci_get_dev_info(argp);
1054
1055 case HCIGETCONNLIST:
1056 return hci_get_conn_list(argp);
1057
1058 case HCIDEVUP:
1059 if (!capable(CAP_NET_ADMIN))
1060 return -EPERM;
1061 return hci_dev_open(arg);
1062
1063 case HCIDEVDOWN:
1064 if (!capable(CAP_NET_ADMIN))
1065 return -EPERM;
1066 return hci_dev_close(arg);
1067
1068 case HCIDEVRESET:
1069 if (!capable(CAP_NET_ADMIN))
1070 return -EPERM;
1071 return hci_dev_reset(arg);
1072
1073 case HCIDEVRESTAT:
1074 if (!capable(CAP_NET_ADMIN))
1075 return -EPERM;
1076 return hci_dev_reset_stat(arg);
1077
1078 case HCISETSCAN:
1079 case HCISETAUTH:
1080 case HCISETENCRYPT:
1081 case HCISETPTYPE:
1082 case HCISETLINKPOL:
1083 case HCISETLINKMODE:
1084 case HCISETACLMTU:
1085 case HCISETSCOMTU:
1086 if (!capable(CAP_NET_ADMIN))
1087 return -EPERM;
1088 return hci_dev_cmd(cmd, argp);
1089
1090 case HCIINQUIRY:
1091 return hci_inquiry(argp);
1092 }
1093
1094 lock_sock(sk);
1095
1096 err = hci_sock_bound_ioctl(sk, cmd, arg);
1097
1098 done:
1099 release_sock(sk);
1100 return err;
1101 }
1102
1103 #ifdef CONFIG_COMPAT
hci_sock_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1104 static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
1105 unsigned long arg)
1106 {
1107 switch (cmd) {
1108 case HCIDEVUP:
1109 case HCIDEVDOWN:
1110 case HCIDEVRESET:
1111 case HCIDEVRESTAT:
1112 return hci_sock_ioctl(sock, cmd, arg);
1113 }
1114
1115 return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
1116 }
1117 #endif
1118
hci_sock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)1119 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1120 int addr_len)
1121 {
1122 struct sockaddr_hci haddr;
1123 struct sock *sk = sock->sk;
1124 struct hci_dev *hdev = NULL;
1125 struct sk_buff *skb;
1126 int len, err = 0;
1127
1128 BT_DBG("sock %p sk %p", sock, sk);
1129
1130 if (!addr)
1131 return -EINVAL;
1132
1133 memset(&haddr, 0, sizeof(haddr));
1134 len = min_t(unsigned int, sizeof(haddr), addr_len);
1135 memcpy(&haddr, addr, len);
1136
1137 if (haddr.hci_family != AF_BLUETOOTH)
1138 return -EINVAL;
1139
1140 lock_sock(sk);
1141
1142 /* Allow detaching from dead device and attaching to alive device, if
1143 * the caller wants to re-bind (instead of close) this socket in
1144 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1145 */
1146 hdev = hci_pi(sk)->hdev;
1147 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1148 hci_pi(sk)->hdev = NULL;
1149 sk->sk_state = BT_OPEN;
1150 hci_dev_put(hdev);
1151 }
1152 hdev = NULL;
1153
1154 if (sk->sk_state == BT_BOUND) {
1155 err = -EALREADY;
1156 goto done;
1157 }
1158
1159 switch (haddr.hci_channel) {
1160 case HCI_CHANNEL_RAW:
1161 if (hci_pi(sk)->hdev) {
1162 err = -EALREADY;
1163 goto done;
1164 }
1165
1166 if (haddr.hci_dev != HCI_DEV_NONE) {
1167 hdev = hci_dev_get(haddr.hci_dev);
1168 if (!hdev) {
1169 err = -ENODEV;
1170 goto done;
1171 }
1172
1173 atomic_inc(&hdev->promisc);
1174 }
1175
1176 hci_pi(sk)->channel = haddr.hci_channel;
1177
1178 if (!hci_sock_gen_cookie(sk)) {
1179 /* In the case when a cookie has already been assigned,
1180 * then there has been already an ioctl issued against
1181 * an unbound socket and with that triggerd an open
1182 * notification. Send a close notification first to
1183 * allow the state transition to bounded.
1184 */
1185 skb = create_monitor_ctrl_close(sk);
1186 if (skb) {
1187 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1188 HCI_SOCK_TRUSTED, NULL);
1189 kfree_skb(skb);
1190 }
1191 }
1192
1193 if (capable(CAP_NET_ADMIN))
1194 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1195
1196 hci_pi(sk)->hdev = hdev;
1197
1198 /* Send event to monitor */
1199 skb = create_monitor_ctrl_open(sk);
1200 if (skb) {
1201 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1202 HCI_SOCK_TRUSTED, NULL);
1203 kfree_skb(skb);
1204 }
1205 break;
1206
1207 case HCI_CHANNEL_USER:
1208 if (hci_pi(sk)->hdev) {
1209 err = -EALREADY;
1210 goto done;
1211 }
1212
1213 if (haddr.hci_dev == HCI_DEV_NONE) {
1214 err = -EINVAL;
1215 goto done;
1216 }
1217
1218 if (!capable(CAP_NET_ADMIN)) {
1219 err = -EPERM;
1220 goto done;
1221 }
1222
1223 hdev = hci_dev_get(haddr.hci_dev);
1224 if (!hdev) {
1225 err = -ENODEV;
1226 goto done;
1227 }
1228
1229 if (test_bit(HCI_INIT, &hdev->flags) ||
1230 hci_dev_test_flag(hdev, HCI_SETUP) ||
1231 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1232 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1233 test_bit(HCI_UP, &hdev->flags))) {
1234 err = -EBUSY;
1235 hci_dev_put(hdev);
1236 goto done;
1237 }
1238
1239 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1240 err = -EUSERS;
1241 hci_dev_put(hdev);
1242 goto done;
1243 }
1244
1245 mgmt_index_removed(hdev);
1246
1247 err = hci_dev_open(hdev->id);
1248 if (err) {
1249 if (err == -EALREADY) {
1250 /* In case the transport is already up and
1251 * running, clear the error here.
1252 *
1253 * This can happen when opening a user
1254 * channel and HCI_AUTO_OFF grace period
1255 * is still active.
1256 */
1257 err = 0;
1258 } else {
1259 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1260 mgmt_index_added(hdev);
1261 hci_dev_put(hdev);
1262 goto done;
1263 }
1264 }
1265
1266 hci_pi(sk)->channel = haddr.hci_channel;
1267
1268 if (!hci_sock_gen_cookie(sk)) {
1269 /* In the case when a cookie has already been assigned,
1270 * this socket will transition from a raw socket into
1271 * a user channel socket. For a clean transition, send
1272 * the close notification first.
1273 */
1274 skb = create_monitor_ctrl_close(sk);
1275 if (skb) {
1276 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1277 HCI_SOCK_TRUSTED, NULL);
1278 kfree_skb(skb);
1279 }
1280 }
1281
1282 /* The user channel is restricted to CAP_NET_ADMIN
1283 * capabilities and with that implicitly trusted.
1284 */
1285 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1286
1287 hci_pi(sk)->hdev = hdev;
1288
1289 /* Send event to monitor */
1290 skb = create_monitor_ctrl_open(sk);
1291 if (skb) {
1292 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1293 HCI_SOCK_TRUSTED, NULL);
1294 kfree_skb(skb);
1295 }
1296
1297 atomic_inc(&hdev->promisc);
1298 break;
1299
1300 case HCI_CHANNEL_MONITOR:
1301 if (haddr.hci_dev != HCI_DEV_NONE) {
1302 err = -EINVAL;
1303 goto done;
1304 }
1305
1306 if (!capable(CAP_NET_RAW)) {
1307 err = -EPERM;
1308 goto done;
1309 }
1310
1311 hci_pi(sk)->channel = haddr.hci_channel;
1312
1313 /* The monitor interface is restricted to CAP_NET_RAW
1314 * capabilities and with that implicitly trusted.
1315 */
1316 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1317
1318 send_monitor_note(sk, "Linux version %s (%s)",
1319 init_utsname()->release,
1320 init_utsname()->machine);
1321 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1322 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1323 send_monitor_replay(sk);
1324 send_monitor_control_replay(sk);
1325
1326 atomic_inc(&monitor_promisc);
1327 break;
1328
1329 case HCI_CHANNEL_LOGGING:
1330 if (haddr.hci_dev != HCI_DEV_NONE) {
1331 err = -EINVAL;
1332 goto done;
1333 }
1334
1335 if (!capable(CAP_NET_ADMIN)) {
1336 err = -EPERM;
1337 goto done;
1338 }
1339
1340 hci_pi(sk)->channel = haddr.hci_channel;
1341 break;
1342
1343 default:
1344 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1345 err = -EINVAL;
1346 goto done;
1347 }
1348
1349 if (haddr.hci_dev != HCI_DEV_NONE) {
1350 err = -EINVAL;
1351 goto done;
1352 }
1353
1354 /* Users with CAP_NET_ADMIN capabilities are allowed
1355 * access to all management commands and events. For
1356 * untrusted users the interface is restricted and
1357 * also only untrusted events are sent.
1358 */
1359 if (capable(CAP_NET_ADMIN))
1360 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1361
1362 hci_pi(sk)->channel = haddr.hci_channel;
1363
1364 /* At the moment the index and unconfigured index events
1365 * are enabled unconditionally. Setting them on each
1366 * socket when binding keeps this functionality. They
1367 * however might be cleared later and then sending of these
1368 * events will be disabled, but that is then intentional.
1369 *
1370 * This also enables generic events that are safe to be
1371 * received by untrusted users. Example for such events
1372 * are changes to settings, class of device, name etc.
1373 */
1374 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1375 if (!hci_sock_gen_cookie(sk)) {
1376 /* In the case when a cookie has already been
1377 * assigned, this socket will transtion from
1378 * a raw socket into a control socket. To
1379 * allow for a clean transtion, send the
1380 * close notification first.
1381 */
1382 skb = create_monitor_ctrl_close(sk);
1383 if (skb) {
1384 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1385 HCI_SOCK_TRUSTED, NULL);
1386 kfree_skb(skb);
1387 }
1388 }
1389
1390 /* Send event to monitor */
1391 skb = create_monitor_ctrl_open(sk);
1392 if (skb) {
1393 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1394 HCI_SOCK_TRUSTED, NULL);
1395 kfree_skb(skb);
1396 }
1397
1398 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1399 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1400 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1401 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1402 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1403 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1404 }
1405 break;
1406 }
1407
1408 sk->sk_state = BT_BOUND;
1409
1410 done:
1411 release_sock(sk);
1412 return err;
1413 }
1414
hci_sock_getname(struct socket * sock,struct sockaddr * addr,int peer)1415 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1416 int peer)
1417 {
1418 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1419 struct sock *sk = sock->sk;
1420 struct hci_dev *hdev;
1421 int err = 0;
1422
1423 BT_DBG("sock %p sk %p", sock, sk);
1424
1425 if (peer)
1426 return -EOPNOTSUPP;
1427
1428 lock_sock(sk);
1429
1430 hdev = hci_hdev_from_sock(sk);
1431 if (IS_ERR(hdev)) {
1432 err = PTR_ERR(hdev);
1433 goto done;
1434 }
1435
1436 haddr->hci_family = AF_BLUETOOTH;
1437 haddr->hci_dev = hdev->id;
1438 haddr->hci_channel= hci_pi(sk)->channel;
1439 err = sizeof(*haddr);
1440
1441 done:
1442 release_sock(sk);
1443 return err;
1444 }
1445
hci_sock_cmsg(struct sock * sk,struct msghdr * msg,struct sk_buff * skb)1446 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1447 struct sk_buff *skb)
1448 {
1449 __u8 mask = hci_pi(sk)->cmsg_mask;
1450
1451 if (mask & HCI_CMSG_DIR) {
1452 int incoming = bt_cb(skb)->incoming;
1453 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1454 &incoming);
1455 }
1456
1457 if (mask & HCI_CMSG_TSTAMP) {
1458 #ifdef CONFIG_COMPAT
1459 struct old_timeval32 ctv;
1460 #endif
1461 struct __kernel_old_timeval tv;
1462 void *data;
1463 int len;
1464
1465 skb_get_timestamp(skb, &tv);
1466
1467 data = &tv;
1468 len = sizeof(tv);
1469 #ifdef CONFIG_COMPAT
1470 if (!COMPAT_USE_64BIT_TIME &&
1471 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1472 ctv.tv_sec = tv.tv_sec;
1473 ctv.tv_usec = tv.tv_usec;
1474 data = &ctv;
1475 len = sizeof(ctv);
1476 }
1477 #endif
1478
1479 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1480 }
1481 }
1482
hci_sock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1483 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1484 size_t len, int flags)
1485 {
1486 int noblock = flags & MSG_DONTWAIT;
1487 struct sock *sk = sock->sk;
1488 struct sk_buff *skb;
1489 int copied, err;
1490 unsigned int skblen;
1491
1492 BT_DBG("sock %p, sk %p", sock, sk);
1493
1494 if (flags & MSG_OOB)
1495 return -EOPNOTSUPP;
1496
1497 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1498 return -EOPNOTSUPP;
1499
1500 if (sk->sk_state == BT_CLOSED)
1501 return 0;
1502
1503 skb = skb_recv_datagram(sk, flags, noblock, &err);
1504 if (!skb)
1505 return err;
1506
1507 skblen = skb->len;
1508 copied = skb->len;
1509 if (len < copied) {
1510 msg->msg_flags |= MSG_TRUNC;
1511 copied = len;
1512 }
1513
1514 skb_reset_transport_header(skb);
1515 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1516
1517 switch (hci_pi(sk)->channel) {
1518 case HCI_CHANNEL_RAW:
1519 hci_sock_cmsg(sk, msg, skb);
1520 break;
1521 case HCI_CHANNEL_USER:
1522 case HCI_CHANNEL_MONITOR:
1523 sock_recv_timestamp(msg, sk, skb);
1524 break;
1525 default:
1526 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1527 sock_recv_timestamp(msg, sk, skb);
1528 break;
1529 }
1530
1531 skb_free_datagram(sk, skb);
1532
1533 if (flags & MSG_TRUNC)
1534 copied = skblen;
1535
1536 return err ? : copied;
1537 }
1538
hci_mgmt_cmd(struct hci_mgmt_chan * chan,struct sock * sk,struct msghdr * msg,size_t msglen)1539 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1540 struct msghdr *msg, size_t msglen)
1541 {
1542 void *buf;
1543 u8 *cp;
1544 struct mgmt_hdr *hdr;
1545 u16 opcode, index, len;
1546 struct hci_dev *hdev = NULL;
1547 const struct hci_mgmt_handler *handler;
1548 bool var_len, no_hdev;
1549 int err;
1550
1551 BT_DBG("got %zu bytes", msglen);
1552
1553 if (msglen < sizeof(*hdr))
1554 return -EINVAL;
1555
1556 buf = kmalloc(msglen, GFP_KERNEL);
1557 if (!buf)
1558 return -ENOMEM;
1559
1560 if (memcpy_from_msg(buf, msg, msglen)) {
1561 err = -EFAULT;
1562 goto done;
1563 }
1564
1565 hdr = buf;
1566 opcode = __le16_to_cpu(hdr->opcode);
1567 index = __le16_to_cpu(hdr->index);
1568 len = __le16_to_cpu(hdr->len);
1569
1570 if (len != msglen - sizeof(*hdr)) {
1571 err = -EINVAL;
1572 goto done;
1573 }
1574
1575 if (chan->channel == HCI_CHANNEL_CONTROL) {
1576 struct sk_buff *skb;
1577
1578 /* Send event to monitor */
1579 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1580 buf + sizeof(*hdr));
1581 if (skb) {
1582 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1583 HCI_SOCK_TRUSTED, NULL);
1584 kfree_skb(skb);
1585 }
1586 }
1587
1588 if (opcode >= chan->handler_count ||
1589 chan->handlers[opcode].func == NULL) {
1590 BT_DBG("Unknown op %u", opcode);
1591 err = mgmt_cmd_status(sk, index, opcode,
1592 MGMT_STATUS_UNKNOWN_COMMAND);
1593 goto done;
1594 }
1595
1596 handler = &chan->handlers[opcode];
1597
1598 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1599 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1600 err = mgmt_cmd_status(sk, index, opcode,
1601 MGMT_STATUS_PERMISSION_DENIED);
1602 goto done;
1603 }
1604
1605 if (index != MGMT_INDEX_NONE) {
1606 hdev = hci_dev_get(index);
1607 if (!hdev) {
1608 err = mgmt_cmd_status(sk, index, opcode,
1609 MGMT_STATUS_INVALID_INDEX);
1610 goto done;
1611 }
1612
1613 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1614 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1615 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1616 err = mgmt_cmd_status(sk, index, opcode,
1617 MGMT_STATUS_INVALID_INDEX);
1618 goto done;
1619 }
1620
1621 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1622 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1623 err = mgmt_cmd_status(sk, index, opcode,
1624 MGMT_STATUS_INVALID_INDEX);
1625 goto done;
1626 }
1627 }
1628
1629 if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) {
1630 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1631 if (no_hdev != !hdev) {
1632 err = mgmt_cmd_status(sk, index, opcode,
1633 MGMT_STATUS_INVALID_INDEX);
1634 goto done;
1635 }
1636 }
1637
1638 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1639 if ((var_len && len < handler->data_len) ||
1640 (!var_len && len != handler->data_len)) {
1641 err = mgmt_cmd_status(sk, index, opcode,
1642 MGMT_STATUS_INVALID_PARAMS);
1643 goto done;
1644 }
1645
1646 if (hdev && chan->hdev_init)
1647 chan->hdev_init(sk, hdev);
1648
1649 cp = buf + sizeof(*hdr);
1650
1651 err = handler->func(sk, hdev, cp, len);
1652 if (err < 0)
1653 goto done;
1654
1655 err = msglen;
1656
1657 done:
1658 if (hdev)
1659 hci_dev_put(hdev);
1660
1661 kfree(buf);
1662 return err;
1663 }
1664
hci_logging_frame(struct sock * sk,struct msghdr * msg,int len)1665 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1666 {
1667 struct hci_mon_hdr *hdr;
1668 struct sk_buff *skb;
1669 struct hci_dev *hdev;
1670 u16 index;
1671 int err;
1672
1673 /* The logging frame consists at minimum of the standard header,
1674 * the priority byte, the ident length byte and at least one string
1675 * terminator NUL byte. Anything shorter are invalid packets.
1676 */
1677 if (len < sizeof(*hdr) + 3)
1678 return -EINVAL;
1679
1680 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1681 if (!skb)
1682 return err;
1683
1684 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1685 err = -EFAULT;
1686 goto drop;
1687 }
1688
1689 hdr = (void *)skb->data;
1690
1691 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1692 err = -EINVAL;
1693 goto drop;
1694 }
1695
1696 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1697 __u8 priority = skb->data[sizeof(*hdr)];
1698 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1699
1700 /* Only the priorities 0-7 are valid and with that any other
1701 * value results in an invalid packet.
1702 *
1703 * The priority byte is followed by an ident length byte and
1704 * the NUL terminated ident string. Check that the ident
1705 * length is not overflowing the packet and also that the
1706 * ident string itself is NUL terminated. In case the ident
1707 * length is zero, the length value actually doubles as NUL
1708 * terminator identifier.
1709 *
1710 * The message follows the ident string (if present) and
1711 * must be NUL terminated. Otherwise it is not a valid packet.
1712 */
1713 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1714 ident_len > len - sizeof(*hdr) - 3 ||
1715 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1716 err = -EINVAL;
1717 goto drop;
1718 }
1719 } else {
1720 err = -EINVAL;
1721 goto drop;
1722 }
1723
1724 index = __le16_to_cpu(hdr->index);
1725
1726 if (index != MGMT_INDEX_NONE) {
1727 hdev = hci_dev_get(index);
1728 if (!hdev) {
1729 err = -ENODEV;
1730 goto drop;
1731 }
1732 } else {
1733 hdev = NULL;
1734 }
1735
1736 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1737
1738 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1739 err = len;
1740
1741 if (hdev)
1742 hci_dev_put(hdev);
1743
1744 drop:
1745 kfree_skb(skb);
1746 return err;
1747 }
1748
hci_sock_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1749 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1750 size_t len)
1751 {
1752 struct sock *sk = sock->sk;
1753 struct hci_mgmt_chan *chan;
1754 struct hci_dev *hdev;
1755 struct sk_buff *skb;
1756 int err;
1757
1758 BT_DBG("sock %p sk %p", sock, sk);
1759
1760 if (msg->msg_flags & MSG_OOB)
1761 return -EOPNOTSUPP;
1762
1763 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1764 MSG_CMSG_COMPAT))
1765 return -EINVAL;
1766
1767 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1768 return -EINVAL;
1769
1770 lock_sock(sk);
1771
1772 switch (hci_pi(sk)->channel) {
1773 case HCI_CHANNEL_RAW:
1774 case HCI_CHANNEL_USER:
1775 break;
1776 case HCI_CHANNEL_MONITOR:
1777 err = -EOPNOTSUPP;
1778 goto done;
1779 case HCI_CHANNEL_LOGGING:
1780 err = hci_logging_frame(sk, msg, len);
1781 goto done;
1782 default:
1783 mutex_lock(&mgmt_chan_list_lock);
1784 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1785 if (chan)
1786 err = hci_mgmt_cmd(chan, sk, msg, len);
1787 else
1788 err = -EINVAL;
1789
1790 mutex_unlock(&mgmt_chan_list_lock);
1791 goto done;
1792 }
1793
1794 hdev = hci_hdev_from_sock(sk);
1795 if (IS_ERR(hdev)) {
1796 err = PTR_ERR(hdev);
1797 goto done;
1798 }
1799
1800 if (!test_bit(HCI_UP, &hdev->flags)) {
1801 err = -ENETDOWN;
1802 goto done;
1803 }
1804
1805 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1806 if (!skb)
1807 goto done;
1808
1809 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1810 err = -EFAULT;
1811 goto drop;
1812 }
1813
1814 hci_skb_pkt_type(skb) = skb->data[0];
1815 skb_pull(skb, 1);
1816
1817 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1818 /* No permission check is needed for user channel
1819 * since that gets enforced when binding the socket.
1820 *
1821 * However check that the packet type is valid.
1822 */
1823 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1824 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1825 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1826 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1827 err = -EINVAL;
1828 goto drop;
1829 }
1830
1831 skb_queue_tail(&hdev->raw_q, skb);
1832 queue_work(hdev->workqueue, &hdev->tx_work);
1833 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1834 u16 opcode = get_unaligned_le16(skb->data);
1835 u16 ogf = hci_opcode_ogf(opcode);
1836 u16 ocf = hci_opcode_ocf(opcode);
1837
1838 if (((ogf > HCI_SFLT_MAX_OGF) ||
1839 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1840 &hci_sec_filter.ocf_mask[ogf])) &&
1841 !capable(CAP_NET_RAW)) {
1842 err = -EPERM;
1843 goto drop;
1844 }
1845
1846 /* Since the opcode has already been extracted here, store
1847 * a copy of the value for later use by the drivers.
1848 */
1849 hci_skb_opcode(skb) = opcode;
1850
1851 if (ogf == 0x3f) {
1852 skb_queue_tail(&hdev->raw_q, skb);
1853 queue_work(hdev->workqueue, &hdev->tx_work);
1854 } else {
1855 /* Stand-alone HCI commands must be flagged as
1856 * single-command requests.
1857 */
1858 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1859
1860 skb_queue_tail(&hdev->cmd_q, skb);
1861 queue_work(hdev->workqueue, &hdev->cmd_work);
1862 }
1863 } else {
1864 if (!capable(CAP_NET_RAW)) {
1865 err = -EPERM;
1866 goto drop;
1867 }
1868
1869 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1870 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
1871 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
1872 err = -EINVAL;
1873 goto drop;
1874 }
1875
1876 skb_queue_tail(&hdev->raw_q, skb);
1877 queue_work(hdev->workqueue, &hdev->tx_work);
1878 }
1879
1880 err = len;
1881
1882 done:
1883 release_sock(sk);
1884 return err;
1885
1886 drop:
1887 kfree_skb(skb);
1888 goto done;
1889 }
1890
hci_sock_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int len)1891 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1892 sockptr_t optval, unsigned int len)
1893 {
1894 struct hci_ufilter uf = { .opcode = 0 };
1895 struct sock *sk = sock->sk;
1896 int err = 0, opt = 0;
1897
1898 BT_DBG("sk %p, opt %d", sk, optname);
1899
1900 if (level != SOL_HCI)
1901 return -ENOPROTOOPT;
1902
1903 lock_sock(sk);
1904
1905 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1906 err = -EBADFD;
1907 goto done;
1908 }
1909
1910 switch (optname) {
1911 case HCI_DATA_DIR:
1912 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1913 err = -EFAULT;
1914 break;
1915 }
1916
1917 if (opt)
1918 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1919 else
1920 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1921 break;
1922
1923 case HCI_TIME_STAMP:
1924 if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
1925 err = -EFAULT;
1926 break;
1927 }
1928
1929 if (opt)
1930 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1931 else
1932 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1933 break;
1934
1935 case HCI_FILTER:
1936 {
1937 struct hci_filter *f = &hci_pi(sk)->filter;
1938
1939 uf.type_mask = f->type_mask;
1940 uf.opcode = f->opcode;
1941 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1942 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1943 }
1944
1945 len = min_t(unsigned int, len, sizeof(uf));
1946 if (copy_from_sockptr(&uf, optval, len)) {
1947 err = -EFAULT;
1948 break;
1949 }
1950
1951 if (!capable(CAP_NET_RAW)) {
1952 uf.type_mask &= hci_sec_filter.type_mask;
1953 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1954 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1955 }
1956
1957 {
1958 struct hci_filter *f = &hci_pi(sk)->filter;
1959
1960 f->type_mask = uf.type_mask;
1961 f->opcode = uf.opcode;
1962 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1963 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1964 }
1965 break;
1966
1967 default:
1968 err = -ENOPROTOOPT;
1969 break;
1970 }
1971
1972 done:
1973 release_sock(sk);
1974 return err;
1975 }
1976
hci_sock_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1977 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1978 char __user *optval, int __user *optlen)
1979 {
1980 struct hci_ufilter uf;
1981 struct sock *sk = sock->sk;
1982 int len, opt, err = 0;
1983
1984 BT_DBG("sk %p, opt %d", sk, optname);
1985
1986 if (level != SOL_HCI)
1987 return -ENOPROTOOPT;
1988
1989 if (get_user(len, optlen))
1990 return -EFAULT;
1991
1992 lock_sock(sk);
1993
1994 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1995 err = -EBADFD;
1996 goto done;
1997 }
1998
1999 switch (optname) {
2000 case HCI_DATA_DIR:
2001 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
2002 opt = 1;
2003 else
2004 opt = 0;
2005
2006 if (put_user(opt, optval))
2007 err = -EFAULT;
2008 break;
2009
2010 case HCI_TIME_STAMP:
2011 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
2012 opt = 1;
2013 else
2014 opt = 0;
2015
2016 if (put_user(opt, optval))
2017 err = -EFAULT;
2018 break;
2019
2020 case HCI_FILTER:
2021 {
2022 struct hci_filter *f = &hci_pi(sk)->filter;
2023
2024 memset(&uf, 0, sizeof(uf));
2025 uf.type_mask = f->type_mask;
2026 uf.opcode = f->opcode;
2027 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2028 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2029 }
2030
2031 len = min_t(unsigned int, len, sizeof(uf));
2032 if (copy_to_user(optval, &uf, len))
2033 err = -EFAULT;
2034 break;
2035
2036 default:
2037 err = -ENOPROTOOPT;
2038 break;
2039 }
2040
2041 done:
2042 release_sock(sk);
2043 return err;
2044 }
2045
hci_sock_destruct(struct sock * sk)2046 static void hci_sock_destruct(struct sock *sk)
2047 {
2048 skb_queue_purge(&sk->sk_receive_queue);
2049 skb_queue_purge(&sk->sk_write_queue);
2050 }
2051
2052 static const struct proto_ops hci_sock_ops = {
2053 .family = PF_BLUETOOTH,
2054 .owner = THIS_MODULE,
2055 .release = hci_sock_release,
2056 .bind = hci_sock_bind,
2057 .getname = hci_sock_getname,
2058 .sendmsg = hci_sock_sendmsg,
2059 .recvmsg = hci_sock_recvmsg,
2060 .ioctl = hci_sock_ioctl,
2061 #ifdef CONFIG_COMPAT
2062 .compat_ioctl = hci_sock_compat_ioctl,
2063 #endif
2064 .poll = datagram_poll,
2065 .listen = sock_no_listen,
2066 .shutdown = sock_no_shutdown,
2067 .setsockopt = hci_sock_setsockopt,
2068 .getsockopt = hci_sock_getsockopt,
2069 .connect = sock_no_connect,
2070 .socketpair = sock_no_socketpair,
2071 .accept = sock_no_accept,
2072 .mmap = sock_no_mmap
2073 };
2074
2075 static struct proto hci_sk_proto = {
2076 .name = "HCI",
2077 .owner = THIS_MODULE,
2078 .obj_size = sizeof(struct hci_pinfo)
2079 };
2080
hci_sock_create(struct net * net,struct socket * sock,int protocol,int kern)2081 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2082 int kern)
2083 {
2084 struct sock *sk;
2085
2086 BT_DBG("sock %p", sock);
2087
2088 if (sock->type != SOCK_RAW)
2089 return -ESOCKTNOSUPPORT;
2090
2091 sock->ops = &hci_sock_ops;
2092
2093 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2094 if (!sk)
2095 return -ENOMEM;
2096
2097 sock_init_data(sock, sk);
2098
2099 sock_reset_flag(sk, SOCK_ZAPPED);
2100
2101 sk->sk_protocol = protocol;
2102
2103 sock->state = SS_UNCONNECTED;
2104 sk->sk_state = BT_OPEN;
2105 sk->sk_destruct = hci_sock_destruct;
2106
2107 bt_sock_link(&hci_sk_list, sk);
2108 return 0;
2109 }
2110
2111 static const struct net_proto_family hci_sock_family_ops = {
2112 .family = PF_BLUETOOTH,
2113 .owner = THIS_MODULE,
2114 .create = hci_sock_create,
2115 };
2116
hci_sock_init(void)2117 int __init hci_sock_init(void)
2118 {
2119 int err;
2120
2121 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2122
2123 err = proto_register(&hci_sk_proto, 0);
2124 if (err < 0)
2125 return err;
2126
2127 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2128 if (err < 0) {
2129 BT_ERR("HCI socket registration failed");
2130 goto error;
2131 }
2132
2133 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2134 if (err < 0) {
2135 BT_ERR("Failed to create HCI proc file");
2136 bt_sock_unregister(BTPROTO_HCI);
2137 goto error;
2138 }
2139
2140 BT_INFO("HCI socket layer initialized");
2141
2142 return 0;
2143
2144 error:
2145 proto_unregister(&hci_sk_proto);
2146 return err;
2147 }
2148
hci_sock_cleanup(void)2149 void hci_sock_cleanup(void)
2150 {
2151 bt_procfs_cleanup(&init_net, "hci");
2152 bt_sock_unregister(BTPROTO_HCI);
2153 proto_unregister(&hci_sk_proto);
2154 }
2155