1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI sockets. */
26
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36
37 #include "mgmt_util.h"
38
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41
42 static DEFINE_IDA(sock_cookie_ida);
43
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45
46 /* ----- HCI socket interface ----- */
47
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50
51 struct hci_pinfo {
52 struct bt_sock bt;
53 struct hci_dev *hdev;
54 struct hci_filter filter;
55 __u32 cmsg_mask;
56 unsigned short channel;
57 unsigned long flags;
58 __u32 cookie;
59 char comm[TASK_COMM_LEN];
60 };
61
hci_hdev_from_sock(struct sock * sk)62 static struct hci_dev *hci_hdev_from_sock(struct sock *sk)
63 {
64 struct hci_dev *hdev = hci_pi(sk)->hdev;
65
66 if (!hdev)
67 return ERR_PTR(-EBADFD);
68 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
69 return ERR_PTR(-EPIPE);
70 return hdev;
71 }
72
hci_sock_set_flag(struct sock * sk,int nr)73 void hci_sock_set_flag(struct sock *sk, int nr)
74 {
75 set_bit(nr, &hci_pi(sk)->flags);
76 }
77
hci_sock_clear_flag(struct sock * sk,int nr)78 void hci_sock_clear_flag(struct sock *sk, int nr)
79 {
80 clear_bit(nr, &hci_pi(sk)->flags);
81 }
82
hci_sock_test_flag(struct sock * sk,int nr)83 int hci_sock_test_flag(struct sock *sk, int nr)
84 {
85 return test_bit(nr, &hci_pi(sk)->flags);
86 }
87
hci_sock_get_channel(struct sock * sk)88 unsigned short hci_sock_get_channel(struct sock *sk)
89 {
90 return hci_pi(sk)->channel;
91 }
92
hci_sock_get_cookie(struct sock * sk)93 u32 hci_sock_get_cookie(struct sock *sk)
94 {
95 return hci_pi(sk)->cookie;
96 }
97
hci_sock_gen_cookie(struct sock * sk)98 static bool hci_sock_gen_cookie(struct sock *sk)
99 {
100 int id = hci_pi(sk)->cookie;
101
102 if (!id) {
103 id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
104 if (id < 0)
105 id = 0xffffffff;
106
107 hci_pi(sk)->cookie = id;
108 get_task_comm(hci_pi(sk)->comm, current);
109 return true;
110 }
111
112 return false;
113 }
114
hci_sock_free_cookie(struct sock * sk)115 static void hci_sock_free_cookie(struct sock *sk)
116 {
117 int id = hci_pi(sk)->cookie;
118
119 if (id) {
120 hci_pi(sk)->cookie = 0xffffffff;
121 ida_simple_remove(&sock_cookie_ida, id);
122 }
123 }
124
hci_test_bit(int nr,const void * addr)125 static inline int hci_test_bit(int nr, const void *addr)
126 {
127 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
128 }
129
130 /* Security filter */
131 #define HCI_SFLT_MAX_OGF 5
132
133 struct hci_sec_filter {
134 __u32 type_mask;
135 __u32 event_mask[2];
136 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
137 };
138
139 static const struct hci_sec_filter hci_sec_filter = {
140 /* Packet types */
141 0x10,
142 /* Events */
143 { 0x1000d9fe, 0x0000b00c },
144 /* Commands */
145 {
146 { 0x0 },
147 /* OGF_LINK_CTL */
148 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
149 /* OGF_LINK_POLICY */
150 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
151 /* OGF_HOST_CTL */
152 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
153 /* OGF_INFO_PARAM */
154 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
155 /* OGF_STATUS_PARAM */
156 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
157 }
158 };
159
160 static struct bt_sock_list hci_sk_list = {
161 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
162 };
163
is_filtered_packet(struct sock * sk,struct sk_buff * skb)164 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
165 {
166 struct hci_filter *flt;
167 int flt_type, flt_event;
168
169 /* Apply filter */
170 flt = &hci_pi(sk)->filter;
171
172 flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
173
174 if (!test_bit(flt_type, &flt->type_mask))
175 return true;
176
177 /* Extra filter for event packets only */
178 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
179 return false;
180
181 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
182
183 if (!hci_test_bit(flt_event, &flt->event_mask))
184 return true;
185
186 /* Check filter only when opcode is set */
187 if (!flt->opcode)
188 return false;
189
190 if (flt_event == HCI_EV_CMD_COMPLETE &&
191 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
192 return true;
193
194 if (flt_event == HCI_EV_CMD_STATUS &&
195 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
196 return true;
197
198 return false;
199 }
200
201 /* Send frame to RAW socket */
hci_send_to_sock(struct hci_dev * hdev,struct sk_buff * skb)202 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
203 {
204 struct sock *sk;
205 struct sk_buff *skb_copy = NULL;
206
207 BT_DBG("hdev %p len %d", hdev, skb->len);
208
209 read_lock(&hci_sk_list.lock);
210
211 sk_for_each(sk, &hci_sk_list.head) {
212 struct sk_buff *nskb;
213
214 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
215 continue;
216
217 /* Don't send frame to the socket it came from */
218 if (skb->sk == sk)
219 continue;
220
221 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
222 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
223 hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
224 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
225 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
226 continue;
227 if (is_filtered_packet(sk, skb))
228 continue;
229 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
230 if (!bt_cb(skb)->incoming)
231 continue;
232 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
233 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
234 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
235 continue;
236 } else {
237 /* Don't send frame to other channel types */
238 continue;
239 }
240
241 if (!skb_copy) {
242 /* Create a private copy with headroom */
243 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
244 if (!skb_copy)
245 continue;
246
247 /* Put type byte before the data */
248 memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
249 }
250
251 nskb = skb_clone(skb_copy, GFP_ATOMIC);
252 if (!nskb)
253 continue;
254
255 if (sock_queue_rcv_skb(sk, nskb))
256 kfree_skb(nskb);
257 }
258
259 read_unlock(&hci_sk_list.lock);
260
261 kfree_skb(skb_copy);
262 }
263
264 /* Send frame to sockets with specific channel */
__hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)265 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
266 int flag, struct sock *skip_sk)
267 {
268 struct sock *sk;
269
270 BT_DBG("channel %u len %d", channel, skb->len);
271
272 sk_for_each(sk, &hci_sk_list.head) {
273 struct sk_buff *nskb;
274
275 /* Ignore socket without the flag set */
276 if (!hci_sock_test_flag(sk, flag))
277 continue;
278
279 /* Skip the original socket */
280 if (sk == skip_sk)
281 continue;
282
283 if (sk->sk_state != BT_BOUND)
284 continue;
285
286 if (hci_pi(sk)->channel != channel)
287 continue;
288
289 nskb = skb_clone(skb, GFP_ATOMIC);
290 if (!nskb)
291 continue;
292
293 if (sock_queue_rcv_skb(sk, nskb))
294 kfree_skb(nskb);
295 }
296
297 }
298
hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)299 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
300 int flag, struct sock *skip_sk)
301 {
302 read_lock(&hci_sk_list.lock);
303 __hci_send_to_channel(channel, skb, flag, skip_sk);
304 read_unlock(&hci_sk_list.lock);
305 }
306
307 /* Send frame to monitor socket */
hci_send_to_monitor(struct hci_dev * hdev,struct sk_buff * skb)308 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
309 {
310 struct sk_buff *skb_copy = NULL;
311 struct hci_mon_hdr *hdr;
312 __le16 opcode;
313
314 if (!atomic_read(&monitor_promisc))
315 return;
316
317 BT_DBG("hdev %p len %d", hdev, skb->len);
318
319 switch (hci_skb_pkt_type(skb)) {
320 case HCI_COMMAND_PKT:
321 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
322 break;
323 case HCI_EVENT_PKT:
324 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
325 break;
326 case HCI_ACLDATA_PKT:
327 if (bt_cb(skb)->incoming)
328 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
329 else
330 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
331 break;
332 case HCI_SCODATA_PKT:
333 if (bt_cb(skb)->incoming)
334 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
335 else
336 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
337 break;
338 case HCI_DIAG_PKT:
339 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
340 break;
341 default:
342 return;
343 }
344
345 /* Create a private copy with headroom */
346 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
347 if (!skb_copy)
348 return;
349
350 /* Put header before the data */
351 hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
352 hdr->opcode = opcode;
353 hdr->index = cpu_to_le16(hdev->id);
354 hdr->len = cpu_to_le16(skb->len);
355
356 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
357 HCI_SOCK_TRUSTED, NULL);
358 kfree_skb(skb_copy);
359 }
360
hci_send_monitor_ctrl_event(struct hci_dev * hdev,u16 event,void * data,u16 data_len,ktime_t tstamp,int flag,struct sock * skip_sk)361 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
362 void *data, u16 data_len, ktime_t tstamp,
363 int flag, struct sock *skip_sk)
364 {
365 struct sock *sk;
366 __le16 index;
367
368 if (hdev)
369 index = cpu_to_le16(hdev->id);
370 else
371 index = cpu_to_le16(MGMT_INDEX_NONE);
372
373 read_lock(&hci_sk_list.lock);
374
375 sk_for_each(sk, &hci_sk_list.head) {
376 struct hci_mon_hdr *hdr;
377 struct sk_buff *skb;
378
379 if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
380 continue;
381
382 /* Ignore socket without the flag set */
383 if (!hci_sock_test_flag(sk, flag))
384 continue;
385
386 /* Skip the original socket */
387 if (sk == skip_sk)
388 continue;
389
390 skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
391 if (!skb)
392 continue;
393
394 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
395 put_unaligned_le16(event, skb_put(skb, 2));
396
397 if (data)
398 skb_put_data(skb, data, data_len);
399
400 skb->tstamp = tstamp;
401
402 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
403 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
404 hdr->index = index;
405 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
406
407 __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
408 HCI_SOCK_TRUSTED, NULL);
409 kfree_skb(skb);
410 }
411
412 read_unlock(&hci_sk_list.lock);
413 }
414
create_monitor_event(struct hci_dev * hdev,int event)415 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
416 {
417 struct hci_mon_hdr *hdr;
418 struct hci_mon_new_index *ni;
419 struct hci_mon_index_info *ii;
420 struct sk_buff *skb;
421 __le16 opcode;
422
423 switch (event) {
424 case HCI_DEV_REG:
425 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
426 if (!skb)
427 return NULL;
428
429 ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
430 ni->type = hdev->dev_type;
431 ni->bus = hdev->bus;
432 bacpy(&ni->bdaddr, &hdev->bdaddr);
433 memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
434 strnlen(hdev->name, sizeof(ni->name)), '\0');
435
436 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
437 break;
438
439 case HCI_DEV_UNREG:
440 skb = bt_skb_alloc(0, GFP_ATOMIC);
441 if (!skb)
442 return NULL;
443
444 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
445 break;
446
447 case HCI_DEV_SETUP:
448 if (hdev->manufacturer == 0xffff)
449 return NULL;
450
451 /* fall through */
452
453 case HCI_DEV_UP:
454 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
455 if (!skb)
456 return NULL;
457
458 ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
459 bacpy(&ii->bdaddr, &hdev->bdaddr);
460 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
461
462 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
463 break;
464
465 case HCI_DEV_OPEN:
466 skb = bt_skb_alloc(0, GFP_ATOMIC);
467 if (!skb)
468 return NULL;
469
470 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
471 break;
472
473 case HCI_DEV_CLOSE:
474 skb = bt_skb_alloc(0, GFP_ATOMIC);
475 if (!skb)
476 return NULL;
477
478 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
479 break;
480
481 default:
482 return NULL;
483 }
484
485 __net_timestamp(skb);
486
487 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
488 hdr->opcode = opcode;
489 hdr->index = cpu_to_le16(hdev->id);
490 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
491
492 return skb;
493 }
494
create_monitor_ctrl_open(struct sock * sk)495 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
496 {
497 struct hci_mon_hdr *hdr;
498 struct sk_buff *skb;
499 u16 format;
500 u8 ver[3];
501 u32 flags;
502
503 /* No message needed when cookie is not present */
504 if (!hci_pi(sk)->cookie)
505 return NULL;
506
507 switch (hci_pi(sk)->channel) {
508 case HCI_CHANNEL_RAW:
509 format = 0x0000;
510 ver[0] = BT_SUBSYS_VERSION;
511 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
512 break;
513 case HCI_CHANNEL_USER:
514 format = 0x0001;
515 ver[0] = BT_SUBSYS_VERSION;
516 put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
517 break;
518 case HCI_CHANNEL_CONTROL:
519 format = 0x0002;
520 mgmt_fill_version_info(ver);
521 break;
522 default:
523 /* No message for unsupported format */
524 return NULL;
525 }
526
527 skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
528 if (!skb)
529 return NULL;
530
531 flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
532
533 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
534 put_unaligned_le16(format, skb_put(skb, 2));
535 skb_put_data(skb, ver, sizeof(ver));
536 put_unaligned_le32(flags, skb_put(skb, 4));
537 skb_put_u8(skb, TASK_COMM_LEN);
538 skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
539
540 __net_timestamp(skb);
541
542 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
543 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
544 if (hci_pi(sk)->hdev)
545 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
546 else
547 hdr->index = cpu_to_le16(HCI_DEV_NONE);
548 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
549
550 return skb;
551 }
552
create_monitor_ctrl_close(struct sock * sk)553 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
554 {
555 struct hci_mon_hdr *hdr;
556 struct sk_buff *skb;
557
558 /* No message needed when cookie is not present */
559 if (!hci_pi(sk)->cookie)
560 return NULL;
561
562 switch (hci_pi(sk)->channel) {
563 case HCI_CHANNEL_RAW:
564 case HCI_CHANNEL_USER:
565 case HCI_CHANNEL_CONTROL:
566 break;
567 default:
568 /* No message for unsupported format */
569 return NULL;
570 }
571
572 skb = bt_skb_alloc(4, GFP_ATOMIC);
573 if (!skb)
574 return NULL;
575
576 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
577
578 __net_timestamp(skb);
579
580 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
581 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
582 if (hci_pi(sk)->hdev)
583 hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
584 else
585 hdr->index = cpu_to_le16(HCI_DEV_NONE);
586 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
587
588 return skb;
589 }
590
create_monitor_ctrl_command(struct sock * sk,u16 index,u16 opcode,u16 len,const void * buf)591 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
592 u16 opcode, u16 len,
593 const void *buf)
594 {
595 struct hci_mon_hdr *hdr;
596 struct sk_buff *skb;
597
598 skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
599 if (!skb)
600 return NULL;
601
602 put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
603 put_unaligned_le16(opcode, skb_put(skb, 2));
604
605 if (buf)
606 skb_put_data(skb, buf, len);
607
608 __net_timestamp(skb);
609
610 hdr = skb_push(skb, HCI_MON_HDR_SIZE);
611 hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
612 hdr->index = cpu_to_le16(index);
613 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
614
615 return skb;
616 }
617
618 static void __printf(2, 3)
send_monitor_note(struct sock * sk,const char * fmt,...)619 send_monitor_note(struct sock *sk, const char *fmt, ...)
620 {
621 size_t len;
622 struct hci_mon_hdr *hdr;
623 struct sk_buff *skb;
624 va_list args;
625
626 va_start(args, fmt);
627 len = vsnprintf(NULL, 0, fmt, args);
628 va_end(args);
629
630 skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
631 if (!skb)
632 return;
633
634 va_start(args, fmt);
635 vsprintf(skb_put(skb, len), fmt, args);
636 *(u8 *)skb_put(skb, 1) = 0;
637 va_end(args);
638
639 __net_timestamp(skb);
640
641 hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
642 hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
643 hdr->index = cpu_to_le16(HCI_DEV_NONE);
644 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
645
646 if (sock_queue_rcv_skb(sk, skb))
647 kfree_skb(skb);
648 }
649
send_monitor_replay(struct sock * sk)650 static void send_monitor_replay(struct sock *sk)
651 {
652 struct hci_dev *hdev;
653
654 read_lock(&hci_dev_list_lock);
655
656 list_for_each_entry(hdev, &hci_dev_list, list) {
657 struct sk_buff *skb;
658
659 skb = create_monitor_event(hdev, HCI_DEV_REG);
660 if (!skb)
661 continue;
662
663 if (sock_queue_rcv_skb(sk, skb))
664 kfree_skb(skb);
665
666 if (!test_bit(HCI_RUNNING, &hdev->flags))
667 continue;
668
669 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
670 if (!skb)
671 continue;
672
673 if (sock_queue_rcv_skb(sk, skb))
674 kfree_skb(skb);
675
676 if (test_bit(HCI_UP, &hdev->flags))
677 skb = create_monitor_event(hdev, HCI_DEV_UP);
678 else if (hci_dev_test_flag(hdev, HCI_SETUP))
679 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
680 else
681 skb = NULL;
682
683 if (skb) {
684 if (sock_queue_rcv_skb(sk, skb))
685 kfree_skb(skb);
686 }
687 }
688
689 read_unlock(&hci_dev_list_lock);
690 }
691
send_monitor_control_replay(struct sock * mon_sk)692 static void send_monitor_control_replay(struct sock *mon_sk)
693 {
694 struct sock *sk;
695
696 read_lock(&hci_sk_list.lock);
697
698 sk_for_each(sk, &hci_sk_list.head) {
699 struct sk_buff *skb;
700
701 skb = create_monitor_ctrl_open(sk);
702 if (!skb)
703 continue;
704
705 if (sock_queue_rcv_skb(mon_sk, skb))
706 kfree_skb(skb);
707 }
708
709 read_unlock(&hci_sk_list.lock);
710 }
711
712 /* Generate internal stack event */
hci_si_event(struct hci_dev * hdev,int type,int dlen,void * data)713 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
714 {
715 struct hci_event_hdr *hdr;
716 struct hci_ev_stack_internal *ev;
717 struct sk_buff *skb;
718
719 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
720 if (!skb)
721 return;
722
723 hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
724 hdr->evt = HCI_EV_STACK_INTERNAL;
725 hdr->plen = sizeof(*ev) + dlen;
726
727 ev = skb_put(skb, sizeof(*ev) + dlen);
728 ev->type = type;
729 memcpy(ev->data, data, dlen);
730
731 bt_cb(skb)->incoming = 1;
732 __net_timestamp(skb);
733
734 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
735 hci_send_to_sock(hdev, skb);
736 kfree_skb(skb);
737 }
738
hci_sock_dev_event(struct hci_dev * hdev,int event)739 void hci_sock_dev_event(struct hci_dev *hdev, int event)
740 {
741 BT_DBG("hdev %s event %d", hdev->name, event);
742
743 if (atomic_read(&monitor_promisc)) {
744 struct sk_buff *skb;
745
746 /* Send event to monitor */
747 skb = create_monitor_event(hdev, event);
748 if (skb) {
749 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
750 HCI_SOCK_TRUSTED, NULL);
751 kfree_skb(skb);
752 }
753 }
754
755 if (event <= HCI_DEV_DOWN) {
756 struct hci_ev_si_device ev;
757
758 /* Send event to sockets */
759 ev.event = event;
760 ev.dev_id = hdev->id;
761 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
762 }
763
764 if (event == HCI_DEV_UNREG) {
765 struct sock *sk;
766
767 /* Wake up sockets using this dead device */
768 read_lock(&hci_sk_list.lock);
769 sk_for_each(sk, &hci_sk_list.head) {
770 if (hci_pi(sk)->hdev == hdev) {
771 sk->sk_err = EPIPE;
772 sk->sk_state_change(sk);
773 }
774 }
775 read_unlock(&hci_sk_list.lock);
776 }
777 }
778
__hci_mgmt_chan_find(unsigned short channel)779 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
780 {
781 struct hci_mgmt_chan *c;
782
783 list_for_each_entry(c, &mgmt_chan_list, list) {
784 if (c->channel == channel)
785 return c;
786 }
787
788 return NULL;
789 }
790
hci_mgmt_chan_find(unsigned short channel)791 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
792 {
793 struct hci_mgmt_chan *c;
794
795 mutex_lock(&mgmt_chan_list_lock);
796 c = __hci_mgmt_chan_find(channel);
797 mutex_unlock(&mgmt_chan_list_lock);
798
799 return c;
800 }
801
hci_mgmt_chan_register(struct hci_mgmt_chan * c)802 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
803 {
804 if (c->channel < HCI_CHANNEL_CONTROL)
805 return -EINVAL;
806
807 mutex_lock(&mgmt_chan_list_lock);
808 if (__hci_mgmt_chan_find(c->channel)) {
809 mutex_unlock(&mgmt_chan_list_lock);
810 return -EALREADY;
811 }
812
813 list_add_tail(&c->list, &mgmt_chan_list);
814
815 mutex_unlock(&mgmt_chan_list_lock);
816
817 return 0;
818 }
819 EXPORT_SYMBOL(hci_mgmt_chan_register);
820
hci_mgmt_chan_unregister(struct hci_mgmt_chan * c)821 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
822 {
823 mutex_lock(&mgmt_chan_list_lock);
824 list_del(&c->list);
825 mutex_unlock(&mgmt_chan_list_lock);
826 }
827 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
828
hci_sock_release(struct socket * sock)829 static int hci_sock_release(struct socket *sock)
830 {
831 struct sock *sk = sock->sk;
832 struct hci_dev *hdev;
833 struct sk_buff *skb;
834
835 BT_DBG("sock %p sk %p", sock, sk);
836
837 if (!sk)
838 return 0;
839
840 lock_sock(sk);
841
842 switch (hci_pi(sk)->channel) {
843 case HCI_CHANNEL_MONITOR:
844 atomic_dec(&monitor_promisc);
845 break;
846 case HCI_CHANNEL_RAW:
847 case HCI_CHANNEL_USER:
848 case HCI_CHANNEL_CONTROL:
849 /* Send event to monitor */
850 skb = create_monitor_ctrl_close(sk);
851 if (skb) {
852 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
853 HCI_SOCK_TRUSTED, NULL);
854 kfree_skb(skb);
855 }
856
857 hci_sock_free_cookie(sk);
858 break;
859 }
860
861 bt_sock_unlink(&hci_sk_list, sk);
862
863 hdev = hci_pi(sk)->hdev;
864 if (hdev) {
865 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
866 /* When releasing a user channel exclusive access,
867 * call hci_dev_do_close directly instead of calling
868 * hci_dev_close to ensure the exclusive access will
869 * be released and the controller brought back down.
870 *
871 * The checking of HCI_AUTO_OFF is not needed in this
872 * case since it will have been cleared already when
873 * opening the user channel.
874 */
875 hci_dev_do_close(hdev);
876 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
877 mgmt_index_added(hdev);
878 }
879
880 atomic_dec(&hdev->promisc);
881 hci_dev_put(hdev);
882 }
883
884 sock_orphan(sk);
885 release_sock(sk);
886 sock_put(sk);
887 return 0;
888 }
889
hci_sock_blacklist_add(struct hci_dev * hdev,void __user * arg)890 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
891 {
892 bdaddr_t bdaddr;
893 int err;
894
895 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
896 return -EFAULT;
897
898 hci_dev_lock(hdev);
899
900 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
901
902 hci_dev_unlock(hdev);
903
904 return err;
905 }
906
hci_sock_blacklist_del(struct hci_dev * hdev,void __user * arg)907 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
908 {
909 bdaddr_t bdaddr;
910 int err;
911
912 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
913 return -EFAULT;
914
915 hci_dev_lock(hdev);
916
917 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
918
919 hci_dev_unlock(hdev);
920
921 return err;
922 }
923
924 /* Ioctls that require bound socket */
hci_sock_bound_ioctl(struct sock * sk,unsigned int cmd,unsigned long arg)925 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
926 unsigned long arg)
927 {
928 struct hci_dev *hdev = hci_hdev_from_sock(sk);
929
930 if (IS_ERR(hdev))
931 return PTR_ERR(hdev);
932
933 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
934 return -EBUSY;
935
936 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
937 return -EOPNOTSUPP;
938
939 if (hdev->dev_type != HCI_PRIMARY)
940 return -EOPNOTSUPP;
941
942 switch (cmd) {
943 case HCISETRAW:
944 if (!capable(CAP_NET_ADMIN))
945 return -EPERM;
946 return -EOPNOTSUPP;
947
948 case HCIGETCONNINFO:
949 return hci_get_conn_info(hdev, (void __user *)arg);
950
951 case HCIGETAUTHINFO:
952 return hci_get_auth_info(hdev, (void __user *)arg);
953
954 case HCIBLOCKADDR:
955 if (!capable(CAP_NET_ADMIN))
956 return -EPERM;
957 return hci_sock_blacklist_add(hdev, (void __user *)arg);
958
959 case HCIUNBLOCKADDR:
960 if (!capable(CAP_NET_ADMIN))
961 return -EPERM;
962 return hci_sock_blacklist_del(hdev, (void __user *)arg);
963 }
964
965 return -ENOIOCTLCMD;
966 }
967
hci_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)968 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
969 unsigned long arg)
970 {
971 void __user *argp = (void __user *)arg;
972 struct sock *sk = sock->sk;
973 int err;
974
975 BT_DBG("cmd %x arg %lx", cmd, arg);
976
977 /* Make sure the cmd is valid before doing anything */
978 switch (cmd) {
979 case HCIGETDEVLIST:
980 case HCIGETDEVINFO:
981 case HCIGETCONNLIST:
982 case HCIDEVUP:
983 case HCIDEVDOWN:
984 case HCIDEVRESET:
985 case HCIDEVRESTAT:
986 case HCISETSCAN:
987 case HCISETAUTH:
988 case HCISETENCRYPT:
989 case HCISETPTYPE:
990 case HCISETLINKPOL:
991 case HCISETLINKMODE:
992 case HCISETACLMTU:
993 case HCISETSCOMTU:
994 case HCIINQUIRY:
995 case HCISETRAW:
996 case HCIGETCONNINFO:
997 case HCIGETAUTHINFO:
998 case HCIBLOCKADDR:
999 case HCIUNBLOCKADDR:
1000 break;
1001 default:
1002 return -ENOIOCTLCMD;
1003 }
1004
1005 lock_sock(sk);
1006
1007 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1008 err = -EBADFD;
1009 goto done;
1010 }
1011
1012 /* When calling an ioctl on an unbound raw socket, then ensure
1013 * that the monitor gets informed. Ensure that the resulting event
1014 * is only send once by checking if the cookie exists or not. The
1015 * socket cookie will be only ever generated once for the lifetime
1016 * of a given socket.
1017 */
1018 if (hci_sock_gen_cookie(sk)) {
1019 struct sk_buff *skb;
1020
1021 /* Perform careful checks before setting the HCI_SOCK_TRUSTED
1022 * flag. Make sure that not only the current task but also
1023 * the socket opener has the required capability, since
1024 * privileged programs can be tricked into making ioctl calls
1025 * on HCI sockets, and the socket should not be marked as
1026 * trusted simply because the ioctl caller is privileged.
1027 */
1028 if (sk_capable(sk, CAP_NET_ADMIN))
1029 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1030
1031 /* Send event to monitor */
1032 skb = create_monitor_ctrl_open(sk);
1033 if (skb) {
1034 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1035 HCI_SOCK_TRUSTED, NULL);
1036 kfree_skb(skb);
1037 }
1038 }
1039
1040 release_sock(sk);
1041
1042 switch (cmd) {
1043 case HCIGETDEVLIST:
1044 return hci_get_dev_list(argp);
1045
1046 case HCIGETDEVINFO:
1047 return hci_get_dev_info(argp);
1048
1049 case HCIGETCONNLIST:
1050 return hci_get_conn_list(argp);
1051
1052 case HCIDEVUP:
1053 if (!capable(CAP_NET_ADMIN))
1054 return -EPERM;
1055 return hci_dev_open(arg);
1056
1057 case HCIDEVDOWN:
1058 if (!capable(CAP_NET_ADMIN))
1059 return -EPERM;
1060 return hci_dev_close(arg);
1061
1062 case HCIDEVRESET:
1063 if (!capable(CAP_NET_ADMIN))
1064 return -EPERM;
1065 return hci_dev_reset(arg);
1066
1067 case HCIDEVRESTAT:
1068 if (!capable(CAP_NET_ADMIN))
1069 return -EPERM;
1070 return hci_dev_reset_stat(arg);
1071
1072 case HCISETSCAN:
1073 case HCISETAUTH:
1074 case HCISETENCRYPT:
1075 case HCISETPTYPE:
1076 case HCISETLINKPOL:
1077 case HCISETLINKMODE:
1078 case HCISETACLMTU:
1079 case HCISETSCOMTU:
1080 if (!capable(CAP_NET_ADMIN))
1081 return -EPERM;
1082 return hci_dev_cmd(cmd, argp);
1083
1084 case HCIINQUIRY:
1085 return hci_inquiry(argp);
1086 }
1087
1088 lock_sock(sk);
1089
1090 err = hci_sock_bound_ioctl(sk, cmd, arg);
1091
1092 done:
1093 release_sock(sk);
1094 return err;
1095 }
1096
hci_sock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)1097 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1098 int addr_len)
1099 {
1100 struct sockaddr_hci haddr;
1101 struct sock *sk = sock->sk;
1102 struct hci_dev *hdev = NULL;
1103 struct sk_buff *skb;
1104 int len, err = 0;
1105
1106 BT_DBG("sock %p sk %p", sock, sk);
1107
1108 if (!addr)
1109 return -EINVAL;
1110
1111 memset(&haddr, 0, sizeof(haddr));
1112 len = min_t(unsigned int, sizeof(haddr), addr_len);
1113 memcpy(&haddr, addr, len);
1114
1115 if (haddr.hci_family != AF_BLUETOOTH)
1116 return -EINVAL;
1117
1118 lock_sock(sk);
1119
1120 /* Allow detaching from dead device and attaching to alive device, if
1121 * the caller wants to re-bind (instead of close) this socket in
1122 * response to hci_sock_dev_event(HCI_DEV_UNREG) notification.
1123 */
1124 hdev = hci_pi(sk)->hdev;
1125 if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1126 hci_pi(sk)->hdev = NULL;
1127 sk->sk_state = BT_OPEN;
1128 hci_dev_put(hdev);
1129 }
1130 hdev = NULL;
1131
1132 if (sk->sk_state == BT_BOUND) {
1133 err = -EALREADY;
1134 goto done;
1135 }
1136
1137 switch (haddr.hci_channel) {
1138 case HCI_CHANNEL_RAW:
1139 if (hci_pi(sk)->hdev) {
1140 err = -EALREADY;
1141 goto done;
1142 }
1143
1144 if (haddr.hci_dev != HCI_DEV_NONE) {
1145 hdev = hci_dev_get(haddr.hci_dev);
1146 if (!hdev) {
1147 err = -ENODEV;
1148 goto done;
1149 }
1150
1151 atomic_inc(&hdev->promisc);
1152 }
1153
1154 hci_pi(sk)->channel = haddr.hci_channel;
1155
1156 if (!hci_sock_gen_cookie(sk)) {
1157 /* In the case when a cookie has already been assigned,
1158 * then there has been already an ioctl issued against
1159 * an unbound socket and with that triggerd an open
1160 * notification. Send a close notification first to
1161 * allow the state transition to bounded.
1162 */
1163 skb = create_monitor_ctrl_close(sk);
1164 if (skb) {
1165 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1166 HCI_SOCK_TRUSTED, NULL);
1167 kfree_skb(skb);
1168 }
1169 }
1170
1171 if (capable(CAP_NET_ADMIN))
1172 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1173
1174 hci_pi(sk)->hdev = hdev;
1175
1176 /* Send event to monitor */
1177 skb = create_monitor_ctrl_open(sk);
1178 if (skb) {
1179 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1180 HCI_SOCK_TRUSTED, NULL);
1181 kfree_skb(skb);
1182 }
1183 break;
1184
1185 case HCI_CHANNEL_USER:
1186 if (hci_pi(sk)->hdev) {
1187 err = -EALREADY;
1188 goto done;
1189 }
1190
1191 if (haddr.hci_dev == HCI_DEV_NONE) {
1192 err = -EINVAL;
1193 goto done;
1194 }
1195
1196 if (!capable(CAP_NET_ADMIN)) {
1197 err = -EPERM;
1198 goto done;
1199 }
1200
1201 hdev = hci_dev_get(haddr.hci_dev);
1202 if (!hdev) {
1203 err = -ENODEV;
1204 goto done;
1205 }
1206
1207 if (test_bit(HCI_INIT, &hdev->flags) ||
1208 hci_dev_test_flag(hdev, HCI_SETUP) ||
1209 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1210 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1211 test_bit(HCI_UP, &hdev->flags))) {
1212 err = -EBUSY;
1213 hci_dev_put(hdev);
1214 goto done;
1215 }
1216
1217 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1218 err = -EUSERS;
1219 hci_dev_put(hdev);
1220 goto done;
1221 }
1222
1223 mgmt_index_removed(hdev);
1224
1225 err = hci_dev_open(hdev->id);
1226 if (err) {
1227 if (err == -EALREADY) {
1228 /* In case the transport is already up and
1229 * running, clear the error here.
1230 *
1231 * This can happen when opening a user
1232 * channel and HCI_AUTO_OFF grace period
1233 * is still active.
1234 */
1235 err = 0;
1236 } else {
1237 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1238 mgmt_index_added(hdev);
1239 hci_dev_put(hdev);
1240 goto done;
1241 }
1242 }
1243
1244 hci_pi(sk)->channel = haddr.hci_channel;
1245
1246 if (!hci_sock_gen_cookie(sk)) {
1247 /* In the case when a cookie has already been assigned,
1248 * this socket will transition from a raw socket into
1249 * a user channel socket. For a clean transition, send
1250 * the close notification first.
1251 */
1252 skb = create_monitor_ctrl_close(sk);
1253 if (skb) {
1254 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1255 HCI_SOCK_TRUSTED, NULL);
1256 kfree_skb(skb);
1257 }
1258 }
1259
1260 /* The user channel is restricted to CAP_NET_ADMIN
1261 * capabilities and with that implicitly trusted.
1262 */
1263 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1264
1265 hci_pi(sk)->hdev = hdev;
1266
1267 /* Send event to monitor */
1268 skb = create_monitor_ctrl_open(sk);
1269 if (skb) {
1270 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1271 HCI_SOCK_TRUSTED, NULL);
1272 kfree_skb(skb);
1273 }
1274
1275 atomic_inc(&hdev->promisc);
1276 break;
1277
1278 case HCI_CHANNEL_MONITOR:
1279 if (haddr.hci_dev != HCI_DEV_NONE) {
1280 err = -EINVAL;
1281 goto done;
1282 }
1283
1284 if (!capable(CAP_NET_RAW)) {
1285 err = -EPERM;
1286 goto done;
1287 }
1288
1289 hci_pi(sk)->channel = haddr.hci_channel;
1290
1291 /* The monitor interface is restricted to CAP_NET_RAW
1292 * capabilities and with that implicitly trusted.
1293 */
1294 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1295
1296 send_monitor_note(sk, "Linux version %s (%s)",
1297 init_utsname()->release,
1298 init_utsname()->machine);
1299 send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1300 BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1301 send_monitor_replay(sk);
1302 send_monitor_control_replay(sk);
1303
1304 atomic_inc(&monitor_promisc);
1305 break;
1306
1307 case HCI_CHANNEL_LOGGING:
1308 if (haddr.hci_dev != HCI_DEV_NONE) {
1309 err = -EINVAL;
1310 goto done;
1311 }
1312
1313 if (!capable(CAP_NET_ADMIN)) {
1314 err = -EPERM;
1315 goto done;
1316 }
1317
1318 hci_pi(sk)->channel = haddr.hci_channel;
1319 break;
1320
1321 default:
1322 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1323 err = -EINVAL;
1324 goto done;
1325 }
1326
1327 if (haddr.hci_dev != HCI_DEV_NONE) {
1328 err = -EINVAL;
1329 goto done;
1330 }
1331
1332 /* Users with CAP_NET_ADMIN capabilities are allowed
1333 * access to all management commands and events. For
1334 * untrusted users the interface is restricted and
1335 * also only untrusted events are sent.
1336 */
1337 if (capable(CAP_NET_ADMIN))
1338 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1339
1340 hci_pi(sk)->channel = haddr.hci_channel;
1341
1342 /* At the moment the index and unconfigured index events
1343 * are enabled unconditionally. Setting them on each
1344 * socket when binding keeps this functionality. They
1345 * however might be cleared later and then sending of these
1346 * events will be disabled, but that is then intentional.
1347 *
1348 * This also enables generic events that are safe to be
1349 * received by untrusted users. Example for such events
1350 * are changes to settings, class of device, name etc.
1351 */
1352 if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1353 if (!hci_sock_gen_cookie(sk)) {
1354 /* In the case when a cookie has already been
1355 * assigned, this socket will transtion from
1356 * a raw socket into a control socket. To
1357 * allow for a clean transtion, send the
1358 * close notification first.
1359 */
1360 skb = create_monitor_ctrl_close(sk);
1361 if (skb) {
1362 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1363 HCI_SOCK_TRUSTED, NULL);
1364 kfree_skb(skb);
1365 }
1366 }
1367
1368 /* Send event to monitor */
1369 skb = create_monitor_ctrl_open(sk);
1370 if (skb) {
1371 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1372 HCI_SOCK_TRUSTED, NULL);
1373 kfree_skb(skb);
1374 }
1375
1376 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1377 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1378 hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1379 hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1380 hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1381 hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1382 }
1383 break;
1384 }
1385
1386 sk->sk_state = BT_BOUND;
1387
1388 done:
1389 release_sock(sk);
1390 return err;
1391 }
1392
hci_sock_getname(struct socket * sock,struct sockaddr * addr,int peer)1393 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1394 int peer)
1395 {
1396 struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1397 struct sock *sk = sock->sk;
1398 struct hci_dev *hdev;
1399 int err = 0;
1400
1401 BT_DBG("sock %p sk %p", sock, sk);
1402
1403 if (peer)
1404 return -EOPNOTSUPP;
1405
1406 lock_sock(sk);
1407
1408 hdev = hci_hdev_from_sock(sk);
1409 if (IS_ERR(hdev)) {
1410 err = PTR_ERR(hdev);
1411 goto done;
1412 }
1413
1414 haddr->hci_family = AF_BLUETOOTH;
1415 haddr->hci_dev = hdev->id;
1416 haddr->hci_channel= hci_pi(sk)->channel;
1417 err = sizeof(*haddr);
1418
1419 done:
1420 release_sock(sk);
1421 return err;
1422 }
1423
hci_sock_cmsg(struct sock * sk,struct msghdr * msg,struct sk_buff * skb)1424 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1425 struct sk_buff *skb)
1426 {
1427 __u32 mask = hci_pi(sk)->cmsg_mask;
1428
1429 if (mask & HCI_CMSG_DIR) {
1430 int incoming = bt_cb(skb)->incoming;
1431 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1432 &incoming);
1433 }
1434
1435 if (mask & HCI_CMSG_TSTAMP) {
1436 #ifdef CONFIG_COMPAT
1437 struct old_timeval32 ctv;
1438 #endif
1439 struct __kernel_old_timeval tv;
1440 void *data;
1441 int len;
1442
1443 skb_get_timestamp(skb, &tv);
1444
1445 data = &tv;
1446 len = sizeof(tv);
1447 #ifdef CONFIG_COMPAT
1448 if (!COMPAT_USE_64BIT_TIME &&
1449 (msg->msg_flags & MSG_CMSG_COMPAT)) {
1450 ctv.tv_sec = tv.tv_sec;
1451 ctv.tv_usec = tv.tv_usec;
1452 data = &ctv;
1453 len = sizeof(ctv);
1454 }
1455 #endif
1456
1457 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1458 }
1459 }
1460
hci_sock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1461 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1462 size_t len, int flags)
1463 {
1464 int noblock = flags & MSG_DONTWAIT;
1465 struct sock *sk = sock->sk;
1466 struct sk_buff *skb;
1467 int copied, err;
1468 unsigned int skblen;
1469
1470 BT_DBG("sock %p, sk %p", sock, sk);
1471
1472 if (flags & MSG_OOB)
1473 return -EOPNOTSUPP;
1474
1475 if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1476 return -EOPNOTSUPP;
1477
1478 if (sk->sk_state == BT_CLOSED)
1479 return 0;
1480
1481 skb = skb_recv_datagram(sk, flags, noblock, &err);
1482 if (!skb)
1483 return err;
1484
1485 skblen = skb->len;
1486 copied = skb->len;
1487 if (len < copied) {
1488 msg->msg_flags |= MSG_TRUNC;
1489 copied = len;
1490 }
1491
1492 skb_reset_transport_header(skb);
1493 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1494
1495 switch (hci_pi(sk)->channel) {
1496 case HCI_CHANNEL_RAW:
1497 hci_sock_cmsg(sk, msg, skb);
1498 break;
1499 case HCI_CHANNEL_USER:
1500 case HCI_CHANNEL_MONITOR:
1501 sock_recv_timestamp(msg, sk, skb);
1502 break;
1503 default:
1504 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1505 sock_recv_timestamp(msg, sk, skb);
1506 break;
1507 }
1508
1509 skb_free_datagram(sk, skb);
1510
1511 if (flags & MSG_TRUNC)
1512 copied = skblen;
1513
1514 return err ? : copied;
1515 }
1516
hci_mgmt_cmd(struct hci_mgmt_chan * chan,struct sock * sk,struct msghdr * msg,size_t msglen)1517 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1518 struct msghdr *msg, size_t msglen)
1519 {
1520 void *buf;
1521 u8 *cp;
1522 struct mgmt_hdr *hdr;
1523 u16 opcode, index, len;
1524 struct hci_dev *hdev = NULL;
1525 const struct hci_mgmt_handler *handler;
1526 bool var_len, no_hdev;
1527 int err;
1528
1529 BT_DBG("got %zu bytes", msglen);
1530
1531 if (msglen < sizeof(*hdr))
1532 return -EINVAL;
1533
1534 buf = kmalloc(msglen, GFP_KERNEL);
1535 if (!buf)
1536 return -ENOMEM;
1537
1538 if (memcpy_from_msg(buf, msg, msglen)) {
1539 err = -EFAULT;
1540 goto done;
1541 }
1542
1543 hdr = buf;
1544 opcode = __le16_to_cpu(hdr->opcode);
1545 index = __le16_to_cpu(hdr->index);
1546 len = __le16_to_cpu(hdr->len);
1547
1548 if (len != msglen - sizeof(*hdr)) {
1549 err = -EINVAL;
1550 goto done;
1551 }
1552
1553 if (chan->channel == HCI_CHANNEL_CONTROL) {
1554 struct sk_buff *skb;
1555
1556 /* Send event to monitor */
1557 skb = create_monitor_ctrl_command(sk, index, opcode, len,
1558 buf + sizeof(*hdr));
1559 if (skb) {
1560 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1561 HCI_SOCK_TRUSTED, NULL);
1562 kfree_skb(skb);
1563 }
1564 }
1565
1566 if (opcode >= chan->handler_count ||
1567 chan->handlers[opcode].func == NULL) {
1568 BT_DBG("Unknown op %u", opcode);
1569 err = mgmt_cmd_status(sk, index, opcode,
1570 MGMT_STATUS_UNKNOWN_COMMAND);
1571 goto done;
1572 }
1573
1574 handler = &chan->handlers[opcode];
1575
1576 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1577 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1578 err = mgmt_cmd_status(sk, index, opcode,
1579 MGMT_STATUS_PERMISSION_DENIED);
1580 goto done;
1581 }
1582
1583 if (index != MGMT_INDEX_NONE) {
1584 hdev = hci_dev_get(index);
1585 if (!hdev) {
1586 err = mgmt_cmd_status(sk, index, opcode,
1587 MGMT_STATUS_INVALID_INDEX);
1588 goto done;
1589 }
1590
1591 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1592 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1593 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1594 err = mgmt_cmd_status(sk, index, opcode,
1595 MGMT_STATUS_INVALID_INDEX);
1596 goto done;
1597 }
1598
1599 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1600 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1601 err = mgmt_cmd_status(sk, index, opcode,
1602 MGMT_STATUS_INVALID_INDEX);
1603 goto done;
1604 }
1605 }
1606
1607 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1608 if (no_hdev != !hdev) {
1609 err = mgmt_cmd_status(sk, index, opcode,
1610 MGMT_STATUS_INVALID_INDEX);
1611 goto done;
1612 }
1613
1614 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1615 if ((var_len && len < handler->data_len) ||
1616 (!var_len && len != handler->data_len)) {
1617 err = mgmt_cmd_status(sk, index, opcode,
1618 MGMT_STATUS_INVALID_PARAMS);
1619 goto done;
1620 }
1621
1622 if (hdev && chan->hdev_init)
1623 chan->hdev_init(sk, hdev);
1624
1625 cp = buf + sizeof(*hdr);
1626
1627 err = handler->func(sk, hdev, cp, len);
1628 if (err < 0)
1629 goto done;
1630
1631 err = msglen;
1632
1633 done:
1634 if (hdev)
1635 hci_dev_put(hdev);
1636
1637 kfree(buf);
1638 return err;
1639 }
1640
hci_logging_frame(struct sock * sk,struct msghdr * msg,int len)1641 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1642 {
1643 struct hci_mon_hdr *hdr;
1644 struct sk_buff *skb;
1645 struct hci_dev *hdev;
1646 u16 index;
1647 int err;
1648
1649 /* The logging frame consists at minimum of the standard header,
1650 * the priority byte, the ident length byte and at least one string
1651 * terminator NUL byte. Anything shorter are invalid packets.
1652 */
1653 if (len < sizeof(*hdr) + 3)
1654 return -EINVAL;
1655
1656 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1657 if (!skb)
1658 return err;
1659
1660 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1661 err = -EFAULT;
1662 goto drop;
1663 }
1664
1665 hdr = (void *)skb->data;
1666
1667 if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1668 err = -EINVAL;
1669 goto drop;
1670 }
1671
1672 if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1673 __u8 priority = skb->data[sizeof(*hdr)];
1674 __u8 ident_len = skb->data[sizeof(*hdr) + 1];
1675
1676 /* Only the priorities 0-7 are valid and with that any other
1677 * value results in an invalid packet.
1678 *
1679 * The priority byte is followed by an ident length byte and
1680 * the NUL terminated ident string. Check that the ident
1681 * length is not overflowing the packet and also that the
1682 * ident string itself is NUL terminated. In case the ident
1683 * length is zero, the length value actually doubles as NUL
1684 * terminator identifier.
1685 *
1686 * The message follows the ident string (if present) and
1687 * must be NUL terminated. Otherwise it is not a valid packet.
1688 */
1689 if (priority > 7 || skb->data[len - 1] != 0x00 ||
1690 ident_len > len - sizeof(*hdr) - 3 ||
1691 skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1692 err = -EINVAL;
1693 goto drop;
1694 }
1695 } else {
1696 err = -EINVAL;
1697 goto drop;
1698 }
1699
1700 index = __le16_to_cpu(hdr->index);
1701
1702 if (index != MGMT_INDEX_NONE) {
1703 hdev = hci_dev_get(index);
1704 if (!hdev) {
1705 err = -ENODEV;
1706 goto drop;
1707 }
1708 } else {
1709 hdev = NULL;
1710 }
1711
1712 hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1713
1714 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1715 err = len;
1716
1717 if (hdev)
1718 hci_dev_put(hdev);
1719
1720 drop:
1721 kfree_skb(skb);
1722 return err;
1723 }
1724
hci_sock_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1725 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1726 size_t len)
1727 {
1728 struct sock *sk = sock->sk;
1729 struct hci_mgmt_chan *chan;
1730 struct hci_dev *hdev;
1731 struct sk_buff *skb;
1732 int err;
1733
1734 BT_DBG("sock %p sk %p", sock, sk);
1735
1736 if (msg->msg_flags & MSG_OOB)
1737 return -EOPNOTSUPP;
1738
1739 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1740 MSG_CMSG_COMPAT))
1741 return -EINVAL;
1742
1743 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1744 return -EINVAL;
1745
1746 lock_sock(sk);
1747
1748 switch (hci_pi(sk)->channel) {
1749 case HCI_CHANNEL_RAW:
1750 case HCI_CHANNEL_USER:
1751 break;
1752 case HCI_CHANNEL_MONITOR:
1753 err = -EOPNOTSUPP;
1754 goto done;
1755 case HCI_CHANNEL_LOGGING:
1756 err = hci_logging_frame(sk, msg, len);
1757 goto done;
1758 default:
1759 mutex_lock(&mgmt_chan_list_lock);
1760 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1761 if (chan)
1762 err = hci_mgmt_cmd(chan, sk, msg, len);
1763 else
1764 err = -EINVAL;
1765
1766 mutex_unlock(&mgmt_chan_list_lock);
1767 goto done;
1768 }
1769
1770 hdev = hci_hdev_from_sock(sk);
1771 if (IS_ERR(hdev)) {
1772 err = PTR_ERR(hdev);
1773 goto done;
1774 }
1775
1776 if (!test_bit(HCI_UP, &hdev->flags)) {
1777 err = -ENETDOWN;
1778 goto done;
1779 }
1780
1781 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1782 if (!skb)
1783 goto done;
1784
1785 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1786 err = -EFAULT;
1787 goto drop;
1788 }
1789
1790 hci_skb_pkt_type(skb) = skb->data[0];
1791 skb_pull(skb, 1);
1792
1793 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1794 /* No permission check is needed for user channel
1795 * since that gets enforced when binding the socket.
1796 *
1797 * However check that the packet type is valid.
1798 */
1799 if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1800 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1801 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1802 err = -EINVAL;
1803 goto drop;
1804 }
1805
1806 skb_queue_tail(&hdev->raw_q, skb);
1807 queue_work(hdev->workqueue, &hdev->tx_work);
1808 } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1809 u16 opcode = get_unaligned_le16(skb->data);
1810 u16 ogf = hci_opcode_ogf(opcode);
1811 u16 ocf = hci_opcode_ocf(opcode);
1812
1813 if (((ogf > HCI_SFLT_MAX_OGF) ||
1814 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1815 &hci_sec_filter.ocf_mask[ogf])) &&
1816 !capable(CAP_NET_RAW)) {
1817 err = -EPERM;
1818 goto drop;
1819 }
1820
1821 /* Since the opcode has already been extracted here, store
1822 * a copy of the value for later use by the drivers.
1823 */
1824 hci_skb_opcode(skb) = opcode;
1825
1826 if (ogf == 0x3f) {
1827 skb_queue_tail(&hdev->raw_q, skb);
1828 queue_work(hdev->workqueue, &hdev->tx_work);
1829 } else {
1830 /* Stand-alone HCI commands must be flagged as
1831 * single-command requests.
1832 */
1833 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1834
1835 skb_queue_tail(&hdev->cmd_q, skb);
1836 queue_work(hdev->workqueue, &hdev->cmd_work);
1837 }
1838 } else {
1839 if (!capable(CAP_NET_RAW)) {
1840 err = -EPERM;
1841 goto drop;
1842 }
1843
1844 if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1845 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1846 err = -EINVAL;
1847 goto drop;
1848 }
1849
1850 skb_queue_tail(&hdev->raw_q, skb);
1851 queue_work(hdev->workqueue, &hdev->tx_work);
1852 }
1853
1854 err = len;
1855
1856 done:
1857 release_sock(sk);
1858 return err;
1859
1860 drop:
1861 kfree_skb(skb);
1862 goto done;
1863 }
1864
hci_sock_setsockopt(struct socket * sock,int level,int optname,char __user * optval,unsigned int len)1865 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1866 char __user *optval, unsigned int len)
1867 {
1868 struct hci_ufilter uf = { .opcode = 0 };
1869 struct sock *sk = sock->sk;
1870 int err = 0, opt = 0;
1871
1872 BT_DBG("sk %p, opt %d", sk, optname);
1873
1874 if (level != SOL_HCI)
1875 return -ENOPROTOOPT;
1876
1877 lock_sock(sk);
1878
1879 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1880 err = -EBADFD;
1881 goto done;
1882 }
1883
1884 switch (optname) {
1885 case HCI_DATA_DIR:
1886 if (get_user(opt, (int __user *)optval)) {
1887 err = -EFAULT;
1888 break;
1889 }
1890
1891 if (opt)
1892 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1893 else
1894 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1895 break;
1896
1897 case HCI_TIME_STAMP:
1898 if (get_user(opt, (int __user *)optval)) {
1899 err = -EFAULT;
1900 break;
1901 }
1902
1903 if (opt)
1904 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1905 else
1906 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1907 break;
1908
1909 case HCI_FILTER:
1910 {
1911 struct hci_filter *f = &hci_pi(sk)->filter;
1912
1913 uf.type_mask = f->type_mask;
1914 uf.opcode = f->opcode;
1915 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1916 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1917 }
1918
1919 len = min_t(unsigned int, len, sizeof(uf));
1920 if (copy_from_user(&uf, optval, len)) {
1921 err = -EFAULT;
1922 break;
1923 }
1924
1925 if (!capable(CAP_NET_RAW)) {
1926 uf.type_mask &= hci_sec_filter.type_mask;
1927 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1928 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1929 }
1930
1931 {
1932 struct hci_filter *f = &hci_pi(sk)->filter;
1933
1934 f->type_mask = uf.type_mask;
1935 f->opcode = uf.opcode;
1936 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1937 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1938 }
1939 break;
1940
1941 default:
1942 err = -ENOPROTOOPT;
1943 break;
1944 }
1945
1946 done:
1947 release_sock(sk);
1948 return err;
1949 }
1950
hci_sock_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1951 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1952 char __user *optval, int __user *optlen)
1953 {
1954 struct hci_ufilter uf;
1955 struct sock *sk = sock->sk;
1956 int len, opt, err = 0;
1957
1958 BT_DBG("sk %p, opt %d", sk, optname);
1959
1960 if (level != SOL_HCI)
1961 return -ENOPROTOOPT;
1962
1963 if (get_user(len, optlen))
1964 return -EFAULT;
1965
1966 lock_sock(sk);
1967
1968 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1969 err = -EBADFD;
1970 goto done;
1971 }
1972
1973 switch (optname) {
1974 case HCI_DATA_DIR:
1975 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1976 opt = 1;
1977 else
1978 opt = 0;
1979
1980 if (put_user(opt, optval))
1981 err = -EFAULT;
1982 break;
1983
1984 case HCI_TIME_STAMP:
1985 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1986 opt = 1;
1987 else
1988 opt = 0;
1989
1990 if (put_user(opt, optval))
1991 err = -EFAULT;
1992 break;
1993
1994 case HCI_FILTER:
1995 {
1996 struct hci_filter *f = &hci_pi(sk)->filter;
1997
1998 memset(&uf, 0, sizeof(uf));
1999 uf.type_mask = f->type_mask;
2000 uf.opcode = f->opcode;
2001 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
2002 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
2003 }
2004
2005 len = min_t(unsigned int, len, sizeof(uf));
2006 if (copy_to_user(optval, &uf, len))
2007 err = -EFAULT;
2008 break;
2009
2010 default:
2011 err = -ENOPROTOOPT;
2012 break;
2013 }
2014
2015 done:
2016 release_sock(sk);
2017 return err;
2018 }
2019
hci_sock_destruct(struct sock * sk)2020 static void hci_sock_destruct(struct sock *sk)
2021 {
2022 skb_queue_purge(&sk->sk_receive_queue);
2023 skb_queue_purge(&sk->sk_write_queue);
2024 }
2025
2026 static const struct proto_ops hci_sock_ops = {
2027 .family = PF_BLUETOOTH,
2028 .owner = THIS_MODULE,
2029 .release = hci_sock_release,
2030 .bind = hci_sock_bind,
2031 .getname = hci_sock_getname,
2032 .sendmsg = hci_sock_sendmsg,
2033 .recvmsg = hci_sock_recvmsg,
2034 .ioctl = hci_sock_ioctl,
2035 .poll = datagram_poll,
2036 .listen = sock_no_listen,
2037 .shutdown = sock_no_shutdown,
2038 .setsockopt = hci_sock_setsockopt,
2039 .getsockopt = hci_sock_getsockopt,
2040 .connect = sock_no_connect,
2041 .socketpair = sock_no_socketpair,
2042 .accept = sock_no_accept,
2043 .mmap = sock_no_mmap
2044 };
2045
2046 static struct proto hci_sk_proto = {
2047 .name = "HCI",
2048 .owner = THIS_MODULE,
2049 .obj_size = sizeof(struct hci_pinfo)
2050 };
2051
hci_sock_create(struct net * net,struct socket * sock,int protocol,int kern)2052 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
2053 int kern)
2054 {
2055 struct sock *sk;
2056
2057 BT_DBG("sock %p", sock);
2058
2059 if (sock->type != SOCK_RAW)
2060 return -ESOCKTNOSUPPORT;
2061
2062 sock->ops = &hci_sock_ops;
2063
2064 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2065 if (!sk)
2066 return -ENOMEM;
2067
2068 sock_init_data(sock, sk);
2069
2070 sock_reset_flag(sk, SOCK_ZAPPED);
2071
2072 sk->sk_protocol = protocol;
2073
2074 sock->state = SS_UNCONNECTED;
2075 sk->sk_state = BT_OPEN;
2076 sk->sk_destruct = hci_sock_destruct;
2077
2078 bt_sock_link(&hci_sk_list, sk);
2079 return 0;
2080 }
2081
2082 static const struct net_proto_family hci_sock_family_ops = {
2083 .family = PF_BLUETOOTH,
2084 .owner = THIS_MODULE,
2085 .create = hci_sock_create,
2086 };
2087
hci_sock_init(void)2088 int __init hci_sock_init(void)
2089 {
2090 int err;
2091
2092 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2093
2094 err = proto_register(&hci_sk_proto, 0);
2095 if (err < 0)
2096 return err;
2097
2098 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2099 if (err < 0) {
2100 BT_ERR("HCI socket registration failed");
2101 goto error;
2102 }
2103
2104 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2105 if (err < 0) {
2106 BT_ERR("Failed to create HCI proc file");
2107 bt_sock_unregister(BTPROTO_HCI);
2108 goto error;
2109 }
2110
2111 BT_INFO("HCI socket layer initialized");
2112
2113 return 0;
2114
2115 error:
2116 proto_unregister(&hci_sk_proto);
2117 return err;
2118 }
2119
hci_sock_cleanup(void)2120 void hci_sock_cleanup(void)
2121 {
2122 bt_procfs_cleanup(&init_net, "hci");
2123 bt_sock_unregister(BTPROTO_HCI);
2124 proto_unregister(&hci_sk_proto);
2125 }
2126