1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth address family and sockets. */
26
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <linux/stringify.h>
30 #include <asm/ioctls.h>
31
32 #include <net/bluetooth/bluetooth.h>
33 #include <linux/proc_fs.h>
34
35 #include "leds.h"
36 #include "selftest.h"
37
38 /* Bluetooth sockets */
39 #define BT_MAX_PROTO 8
40 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
41 static DEFINE_RWLOCK(bt_proto_lock);
42
43 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
44 static const char *const bt_key_strings[BT_MAX_PROTO] = {
45 "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
46 "sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
47 "sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
48 "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
49 "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
50 "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
51 "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
52 "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
53 };
54
55 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
56 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
57 "slock-AF_BLUETOOTH-BTPROTO_L2CAP",
58 "slock-AF_BLUETOOTH-BTPROTO_HCI",
59 "slock-AF_BLUETOOTH-BTPROTO_SCO",
60 "slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
61 "slock-AF_BLUETOOTH-BTPROTO_BNEP",
62 "slock-AF_BLUETOOTH-BTPROTO_CMTP",
63 "slock-AF_BLUETOOTH-BTPROTO_HIDP",
64 "slock-AF_BLUETOOTH-BTPROTO_AVDTP",
65 };
66
bt_sock_reclassify_lock(struct sock * sk,int proto)67 void bt_sock_reclassify_lock(struct sock *sk, int proto)
68 {
69 BUG_ON(!sk);
70 BUG_ON(!sock_allow_reclassification(sk));
71
72 sock_lock_init_class_and_name(sk,
73 bt_slock_key_strings[proto], &bt_slock_key[proto],
74 bt_key_strings[proto], &bt_lock_key[proto]);
75 }
76 EXPORT_SYMBOL(bt_sock_reclassify_lock);
77
bt_sock_register(int proto,const struct net_proto_family * ops)78 int bt_sock_register(int proto, const struct net_proto_family *ops)
79 {
80 int err = 0;
81
82 if (proto < 0 || proto >= BT_MAX_PROTO)
83 return -EINVAL;
84
85 write_lock(&bt_proto_lock);
86
87 if (bt_proto[proto])
88 err = -EEXIST;
89 else
90 bt_proto[proto] = ops;
91
92 write_unlock(&bt_proto_lock);
93
94 return err;
95 }
96 EXPORT_SYMBOL(bt_sock_register);
97
bt_sock_unregister(int proto)98 void bt_sock_unregister(int proto)
99 {
100 if (proto < 0 || proto >= BT_MAX_PROTO)
101 return;
102
103 write_lock(&bt_proto_lock);
104 bt_proto[proto] = NULL;
105 write_unlock(&bt_proto_lock);
106 }
107 EXPORT_SYMBOL(bt_sock_unregister);
108
109 #ifdef CONFIG_PARANOID_NETWORK
current_has_bt_admin(void)110 static inline int current_has_bt_admin(void)
111 {
112 return !current_euid();
113 }
114
current_has_bt(void)115 static inline int current_has_bt(void)
116 {
117 return current_has_bt_admin();
118 }
119 # else
current_has_bt_admin(void)120 static inline int current_has_bt_admin(void)
121 {
122 return 1;
123 }
124
current_has_bt(void)125 static inline int current_has_bt(void)
126 {
127 return 1;
128 }
129 #endif
130
bt_sock_create(struct net * net,struct socket * sock,int proto,int kern)131 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
132 int kern)
133 {
134 int err;
135
136 if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
137 proto == BTPROTO_L2CAP) {
138 if (!current_has_bt())
139 return -EPERM;
140 } else if (!current_has_bt_admin())
141 return -EPERM;
142
143 if (net != &init_net)
144 return -EAFNOSUPPORT;
145
146 if (proto < 0 || proto >= BT_MAX_PROTO)
147 return -EINVAL;
148
149 if (!bt_proto[proto])
150 request_module("bt-proto-%d", proto);
151
152 err = -EPROTONOSUPPORT;
153
154 read_lock(&bt_proto_lock);
155
156 if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
157 err = bt_proto[proto]->create(net, sock, proto, kern);
158 if (!err)
159 bt_sock_reclassify_lock(sock->sk, proto);
160 module_put(bt_proto[proto]->owner);
161 }
162
163 read_unlock(&bt_proto_lock);
164
165 return err;
166 }
167
bt_sock_link(struct bt_sock_list * l,struct sock * sk)168 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
169 {
170 write_lock(&l->lock);
171 sk_add_node(sk, &l->head);
172 write_unlock(&l->lock);
173 }
174 EXPORT_SYMBOL(bt_sock_link);
175
bt_sock_unlink(struct bt_sock_list * l,struct sock * sk)176 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
177 {
178 write_lock(&l->lock);
179 sk_del_node_init(sk);
180 write_unlock(&l->lock);
181 }
182 EXPORT_SYMBOL(bt_sock_unlink);
183
bt_accept_enqueue(struct sock * parent,struct sock * sk)184 void bt_accept_enqueue(struct sock *parent, struct sock *sk)
185 {
186 BT_DBG("parent %p, sk %p", parent, sk);
187
188 sock_hold(sk);
189 list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
190 bt_sk(sk)->parent = parent;
191 parent->sk_ack_backlog++;
192 }
193 EXPORT_SYMBOL(bt_accept_enqueue);
194
195 /* Calling function must hold the sk lock.
196 * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
197 */
bt_accept_unlink(struct sock * sk)198 void bt_accept_unlink(struct sock *sk)
199 {
200 BT_DBG("sk %p state %d", sk, sk->sk_state);
201
202 list_del_init(&bt_sk(sk)->accept_q);
203 bt_sk(sk)->parent->sk_ack_backlog--;
204 bt_sk(sk)->parent = NULL;
205 sock_put(sk);
206 }
207 EXPORT_SYMBOL(bt_accept_unlink);
208
bt_accept_dequeue(struct sock * parent,struct socket * newsock)209 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
210 {
211 struct bt_sock *s, *n;
212 struct sock *sk;
213
214 BT_DBG("parent %p", parent);
215
216 restart:
217 list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
218 sk = (struct sock *)s;
219
220 /* Prevent early freeing of sk due to unlink and sock_kill */
221 sock_hold(sk);
222 lock_sock(sk);
223
224 /* Check sk has not already been unlinked via
225 * bt_accept_unlink() due to serialisation caused by sk locking
226 */
227 if (!bt_sk(sk)->parent) {
228 BT_DBG("sk %p, already unlinked", sk);
229 release_sock(sk);
230 sock_put(sk);
231
232 /* Restart the loop as sk is no longer in the list
233 * and also avoid a potential infinite loop because
234 * list_for_each_entry_safe() is not thread safe.
235 */
236 goto restart;
237 }
238
239 /* sk is safely in the parent list so reduce reference count */
240 sock_put(sk);
241
242 /* FIXME: Is this check still needed */
243 if (sk->sk_state == BT_CLOSED) {
244 bt_accept_unlink(sk);
245 release_sock(sk);
246 continue;
247 }
248
249 if (sk->sk_state == BT_CONNECTED || !newsock ||
250 test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
251 bt_accept_unlink(sk);
252 if (newsock)
253 sock_graft(sk, newsock);
254
255 release_sock(sk);
256 return sk;
257 }
258
259 release_sock(sk);
260 }
261
262 return NULL;
263 }
264 EXPORT_SYMBOL(bt_accept_dequeue);
265
bt_sock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)266 int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
267 int flags)
268 {
269 int noblock = flags & MSG_DONTWAIT;
270 struct sock *sk = sock->sk;
271 struct sk_buff *skb;
272 size_t copied;
273 size_t skblen;
274 int err;
275
276 BT_DBG("sock %p sk %p len %zu", sock, sk, len);
277
278 if (flags & MSG_OOB)
279 return -EOPNOTSUPP;
280
281 skb = skb_recv_datagram(sk, flags, noblock, &err);
282 if (!skb) {
283 if (sk->sk_shutdown & RCV_SHUTDOWN)
284 return 0;
285
286 return err;
287 }
288
289 skblen = skb->len;
290 copied = skb->len;
291 if (len < copied) {
292 msg->msg_flags |= MSG_TRUNC;
293 copied = len;
294 }
295
296 skb_reset_transport_header(skb);
297 err = skb_copy_datagram_msg(skb, 0, msg, copied);
298 if (err == 0) {
299 sock_recv_ts_and_drops(msg, sk, skb);
300
301 if (bt_sk(sk)->skb_msg_name)
302 bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
303 &msg->msg_namelen);
304 }
305
306 skb_free_datagram(sk, skb);
307
308 if (flags & MSG_TRUNC)
309 copied = skblen;
310
311 return err ? : copied;
312 }
313 EXPORT_SYMBOL(bt_sock_recvmsg);
314
bt_sock_data_wait(struct sock * sk,long timeo)315 static long bt_sock_data_wait(struct sock *sk, long timeo)
316 {
317 DECLARE_WAITQUEUE(wait, current);
318
319 add_wait_queue(sk_sleep(sk), &wait);
320 for (;;) {
321 set_current_state(TASK_INTERRUPTIBLE);
322
323 if (!skb_queue_empty(&sk->sk_receive_queue))
324 break;
325
326 if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
327 break;
328
329 if (signal_pending(current) || !timeo)
330 break;
331
332 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
333 release_sock(sk);
334 timeo = schedule_timeout(timeo);
335 lock_sock(sk);
336 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
337 }
338
339 __set_current_state(TASK_RUNNING);
340 remove_wait_queue(sk_sleep(sk), &wait);
341 return timeo;
342 }
343
bt_sock_stream_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)344 int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
345 size_t size, int flags)
346 {
347 struct sock *sk = sock->sk;
348 int err = 0;
349 size_t target, copied = 0;
350 long timeo;
351
352 if (flags & MSG_OOB)
353 return -EOPNOTSUPP;
354
355 BT_DBG("sk %p size %zu", sk, size);
356
357 lock_sock(sk);
358
359 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
360 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
361
362 do {
363 struct sk_buff *skb;
364 int chunk;
365
366 skb = skb_dequeue(&sk->sk_receive_queue);
367 if (!skb) {
368 if (copied >= target)
369 break;
370
371 err = sock_error(sk);
372 if (err)
373 break;
374 if (sk->sk_shutdown & RCV_SHUTDOWN)
375 break;
376
377 err = -EAGAIN;
378 if (!timeo)
379 break;
380
381 timeo = bt_sock_data_wait(sk, timeo);
382
383 if (signal_pending(current)) {
384 err = sock_intr_errno(timeo);
385 goto out;
386 }
387 continue;
388 }
389
390 chunk = min_t(unsigned int, skb->len, size);
391 if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
392 skb_queue_head(&sk->sk_receive_queue, skb);
393 if (!copied)
394 copied = -EFAULT;
395 break;
396 }
397 copied += chunk;
398 size -= chunk;
399
400 sock_recv_ts_and_drops(msg, sk, skb);
401
402 if (!(flags & MSG_PEEK)) {
403 int skb_len = skb_headlen(skb);
404
405 if (chunk <= skb_len) {
406 __skb_pull(skb, chunk);
407 } else {
408 struct sk_buff *frag;
409
410 __skb_pull(skb, skb_len);
411 chunk -= skb_len;
412
413 skb_walk_frags(skb, frag) {
414 if (chunk <= frag->len) {
415 /* Pulling partial data */
416 skb->len -= chunk;
417 skb->data_len -= chunk;
418 __skb_pull(frag, chunk);
419 break;
420 } else if (frag->len) {
421 /* Pulling all frag data */
422 chunk -= frag->len;
423 skb->len -= frag->len;
424 skb->data_len -= frag->len;
425 __skb_pull(frag, frag->len);
426 }
427 }
428 }
429
430 if (skb->len) {
431 skb_queue_head(&sk->sk_receive_queue, skb);
432 break;
433 }
434 kfree_skb(skb);
435
436 } else {
437 /* put message back and return */
438 skb_queue_head(&sk->sk_receive_queue, skb);
439 break;
440 }
441 } while (size);
442
443 out:
444 release_sock(sk);
445 return copied ? : err;
446 }
447 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
448
bt_accept_poll(struct sock * parent)449 static inline unsigned int bt_accept_poll(struct sock *parent)
450 {
451 struct bt_sock *s, *n;
452 struct sock *sk;
453
454 list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
455 sk = (struct sock *)s;
456 if (sk->sk_state == BT_CONNECTED ||
457 (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
458 sk->sk_state == BT_CONNECT2))
459 return POLLIN | POLLRDNORM;
460 }
461
462 return 0;
463 }
464
bt_sock_poll(struct file * file,struct socket * sock,poll_table * wait)465 unsigned int bt_sock_poll(struct file *file, struct socket *sock,
466 poll_table *wait)
467 {
468 struct sock *sk = sock->sk;
469 unsigned int mask = 0;
470
471 BT_DBG("sock %p, sk %p", sock, sk);
472
473 poll_wait(file, sk_sleep(sk), wait);
474
475 if (sk->sk_state == BT_LISTEN)
476 return bt_accept_poll(sk);
477
478 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
479 mask |= POLLERR |
480 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
481
482 if (sk->sk_shutdown & RCV_SHUTDOWN)
483 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
484
485 if (sk->sk_shutdown == SHUTDOWN_MASK)
486 mask |= POLLHUP;
487
488 if (!skb_queue_empty(&sk->sk_receive_queue))
489 mask |= POLLIN | POLLRDNORM;
490
491 if (sk->sk_state == BT_CLOSED)
492 mask |= POLLHUP;
493
494 if (sk->sk_state == BT_CONNECT ||
495 sk->sk_state == BT_CONNECT2 ||
496 sk->sk_state == BT_CONFIG)
497 return mask;
498
499 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
500 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
501 else
502 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
503
504 return mask;
505 }
506 EXPORT_SYMBOL(bt_sock_poll);
507
bt_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)508 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
509 {
510 struct sock *sk = sock->sk;
511 struct sk_buff *skb;
512 long amount;
513 int err;
514
515 BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
516
517 switch (cmd) {
518 case TIOCOUTQ:
519 if (sk->sk_state == BT_LISTEN)
520 return -EINVAL;
521
522 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
523 if (amount < 0)
524 amount = 0;
525 err = put_user(amount, (int __user *) arg);
526 break;
527
528 case TIOCINQ:
529 if (sk->sk_state == BT_LISTEN)
530 return -EINVAL;
531
532 lock_sock(sk);
533 skb = skb_peek(&sk->sk_receive_queue);
534 amount = skb ? skb->len : 0;
535 release_sock(sk);
536 err = put_user(amount, (int __user *) arg);
537 break;
538
539 case SIOCGSTAMP:
540 err = sock_get_timestamp(sk, (struct timeval __user *) arg);
541 break;
542
543 case SIOCGSTAMPNS:
544 err = sock_get_timestampns(sk, (struct timespec __user *) arg);
545 break;
546
547 default:
548 err = -ENOIOCTLCMD;
549 break;
550 }
551
552 return err;
553 }
554 EXPORT_SYMBOL(bt_sock_ioctl);
555
556 /* This function expects the sk lock to be held when called */
bt_sock_wait_state(struct sock * sk,int state,unsigned long timeo)557 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
558 {
559 DECLARE_WAITQUEUE(wait, current);
560 int err = 0;
561
562 BT_DBG("sk %p", sk);
563
564 add_wait_queue(sk_sleep(sk), &wait);
565 set_current_state(TASK_INTERRUPTIBLE);
566 while (sk->sk_state != state) {
567 if (!timeo) {
568 err = -EINPROGRESS;
569 break;
570 }
571
572 if (signal_pending(current)) {
573 err = sock_intr_errno(timeo);
574 break;
575 }
576
577 release_sock(sk);
578 timeo = schedule_timeout(timeo);
579 lock_sock(sk);
580 set_current_state(TASK_INTERRUPTIBLE);
581
582 err = sock_error(sk);
583 if (err)
584 break;
585 }
586 __set_current_state(TASK_RUNNING);
587 remove_wait_queue(sk_sleep(sk), &wait);
588 return err;
589 }
590 EXPORT_SYMBOL(bt_sock_wait_state);
591
592 /* This function expects the sk lock to be held when called */
bt_sock_wait_ready(struct sock * sk,unsigned long flags)593 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
594 {
595 DECLARE_WAITQUEUE(wait, current);
596 unsigned long timeo;
597 int err = 0;
598
599 BT_DBG("sk %p", sk);
600
601 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
602
603 add_wait_queue(sk_sleep(sk), &wait);
604 set_current_state(TASK_INTERRUPTIBLE);
605 while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
606 if (!timeo) {
607 err = -EAGAIN;
608 break;
609 }
610
611 if (signal_pending(current)) {
612 err = sock_intr_errno(timeo);
613 break;
614 }
615
616 release_sock(sk);
617 timeo = schedule_timeout(timeo);
618 lock_sock(sk);
619 set_current_state(TASK_INTERRUPTIBLE);
620
621 err = sock_error(sk);
622 if (err)
623 break;
624 }
625 __set_current_state(TASK_RUNNING);
626 remove_wait_queue(sk_sleep(sk), &wait);
627
628 return err;
629 }
630 EXPORT_SYMBOL(bt_sock_wait_ready);
631
632 #ifdef CONFIG_PROC_FS
633 struct bt_seq_state {
634 struct bt_sock_list *l;
635 };
636
bt_seq_start(struct seq_file * seq,loff_t * pos)637 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
638 __acquires(seq->private->l->lock)
639 {
640 struct bt_seq_state *s = seq->private;
641 struct bt_sock_list *l = s->l;
642
643 read_lock(&l->lock);
644 return seq_hlist_start_head(&l->head, *pos);
645 }
646
bt_seq_next(struct seq_file * seq,void * v,loff_t * pos)647 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
648 {
649 struct bt_seq_state *s = seq->private;
650 struct bt_sock_list *l = s->l;
651
652 return seq_hlist_next(v, &l->head, pos);
653 }
654
bt_seq_stop(struct seq_file * seq,void * v)655 static void bt_seq_stop(struct seq_file *seq, void *v)
656 __releases(seq->private->l->lock)
657 {
658 struct bt_seq_state *s = seq->private;
659 struct bt_sock_list *l = s->l;
660
661 read_unlock(&l->lock);
662 }
663
bt_seq_show(struct seq_file * seq,void * v)664 static int bt_seq_show(struct seq_file *seq, void *v)
665 {
666 struct bt_seq_state *s = seq->private;
667 struct bt_sock_list *l = s->l;
668
669 if (v == SEQ_START_TOKEN) {
670 seq_puts(seq ,"sk RefCnt Rmem Wmem User Inode Parent");
671
672 if (l->custom_seq_show) {
673 seq_putc(seq, ' ');
674 l->custom_seq_show(seq, v);
675 }
676
677 seq_putc(seq, '\n');
678 } else {
679 struct sock *sk = sk_entry(v);
680 struct bt_sock *bt = bt_sk(sk);
681
682 seq_printf(seq,
683 "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
684 sk,
685 atomic_read(&sk->sk_refcnt),
686 sk_rmem_alloc_get(sk),
687 sk_wmem_alloc_get(sk),
688 from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
689 sock_i_ino(sk),
690 bt->parent? sock_i_ino(bt->parent): 0LU);
691
692 if (l->custom_seq_show) {
693 seq_putc(seq, ' ');
694 l->custom_seq_show(seq, v);
695 }
696
697 seq_putc(seq, '\n');
698 }
699 return 0;
700 }
701
702 static const struct seq_operations bt_seq_ops = {
703 .start = bt_seq_start,
704 .next = bt_seq_next,
705 .stop = bt_seq_stop,
706 .show = bt_seq_show,
707 };
708
bt_seq_open(struct inode * inode,struct file * file)709 static int bt_seq_open(struct inode *inode, struct file *file)
710 {
711 struct bt_sock_list *sk_list;
712 struct bt_seq_state *s;
713
714 sk_list = PDE_DATA(inode);
715 s = __seq_open_private(file, &bt_seq_ops,
716 sizeof(struct bt_seq_state));
717 if (!s)
718 return -ENOMEM;
719
720 s->l = sk_list;
721 return 0;
722 }
723
724 static const struct file_operations bt_fops = {
725 .open = bt_seq_open,
726 .read = seq_read,
727 .llseek = seq_lseek,
728 .release = seq_release_private
729 };
730
bt_procfs_init(struct net * net,const char * name,struct bt_sock_list * sk_list,int (* seq_show)(struct seq_file *,void *))731 int bt_procfs_init(struct net *net, const char *name,
732 struct bt_sock_list *sk_list,
733 int (* seq_show)(struct seq_file *, void *))
734 {
735 sk_list->custom_seq_show = seq_show;
736
737 if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
738 return -ENOMEM;
739 return 0;
740 }
741
bt_procfs_cleanup(struct net * net,const char * name)742 void bt_procfs_cleanup(struct net *net, const char *name)
743 {
744 remove_proc_entry(name, net->proc_net);
745 }
746 #else
bt_procfs_init(struct net * net,const char * name,struct bt_sock_list * sk_list,int (* seq_show)(struct seq_file *,void *))747 int bt_procfs_init(struct net *net, const char *name,
748 struct bt_sock_list *sk_list,
749 int (* seq_show)(struct seq_file *, void *))
750 {
751 return 0;
752 }
753
bt_procfs_cleanup(struct net * net,const char * name)754 void bt_procfs_cleanup(struct net *net, const char *name)
755 {
756 }
757 #endif
758 EXPORT_SYMBOL(bt_procfs_init);
759 EXPORT_SYMBOL(bt_procfs_cleanup);
760
761 static struct net_proto_family bt_sock_family_ops = {
762 .owner = THIS_MODULE,
763 .family = PF_BLUETOOTH,
764 .create = bt_sock_create,
765 };
766
767 struct dentry *bt_debugfs;
768 EXPORT_SYMBOL_GPL(bt_debugfs);
769
770 #define VERSION __stringify(BT_SUBSYS_VERSION) "." \
771 __stringify(BT_SUBSYS_REVISION)
772
bt_init(void)773 static int __init bt_init(void)
774 {
775 int err;
776
777 sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
778
779 BT_INFO("Core ver %s", VERSION);
780
781 err = bt_selftest();
782 if (err < 0)
783 return err;
784
785 bt_debugfs = debugfs_create_dir("bluetooth", NULL);
786
787 bt_leds_init();
788
789 err = bt_sysfs_init();
790 if (err < 0)
791 return err;
792
793 err = sock_register(&bt_sock_family_ops);
794 if (err < 0) {
795 bt_sysfs_cleanup();
796 return err;
797 }
798
799 BT_INFO("HCI device and connection manager initialized");
800
801 err = hci_sock_init();
802 if (err < 0)
803 goto error;
804
805 err = l2cap_init();
806 if (err < 0)
807 goto sock_err;
808
809 err = sco_init();
810 if (err < 0) {
811 l2cap_exit();
812 goto sock_err;
813 }
814
815 err = mgmt_init();
816 if (err < 0) {
817 sco_exit();
818 l2cap_exit();
819 goto sock_err;
820 }
821
822 return 0;
823
824 sock_err:
825 hci_sock_cleanup();
826
827 error:
828 sock_unregister(PF_BLUETOOTH);
829 bt_sysfs_cleanup();
830
831 return err;
832 }
833
bt_exit(void)834 static void __exit bt_exit(void)
835 {
836 mgmt_exit();
837
838 sco_exit();
839
840 l2cap_exit();
841
842 hci_sock_cleanup();
843
844 sock_unregister(PF_BLUETOOTH);
845
846 bt_sysfs_cleanup();
847
848 bt_leds_cleanup();
849
850 debugfs_remove_recursive(bt_debugfs);
851 }
852
853 subsys_initcall(bt_init);
854 module_exit(bt_exit);
855
856 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
857 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
858 MODULE_VERSION(VERSION);
859 MODULE_LICENSE("GPL");
860 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
861