• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth address family and sockets. */
26 
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <linux/stringify.h>
30 #include <linux/sched/signal.h>
31 
32 #include <asm/ioctls.h>
33 
34 #include <net/bluetooth/bluetooth.h>
35 #include <linux/proc_fs.h>
36 
37 #include "leds.h"
38 #include "selftest.h"
39 
40 /* Bluetooth sockets */
41 #define BT_MAX_PROTO	8
42 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
43 static DEFINE_RWLOCK(bt_proto_lock);
44 
45 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
46 static const char *const bt_key_strings[BT_MAX_PROTO] = {
47 	"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
48 	"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
49 	"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
50 	"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
51 	"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
52 	"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
53 	"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
54 	"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
55 };
56 
57 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
58 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
59 	"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
60 	"slock-AF_BLUETOOTH-BTPROTO_HCI",
61 	"slock-AF_BLUETOOTH-BTPROTO_SCO",
62 	"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
63 	"slock-AF_BLUETOOTH-BTPROTO_BNEP",
64 	"slock-AF_BLUETOOTH-BTPROTO_CMTP",
65 	"slock-AF_BLUETOOTH-BTPROTO_HIDP",
66 	"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
67 };
68 
bt_sock_reclassify_lock(struct sock * sk,int proto)69 void bt_sock_reclassify_lock(struct sock *sk, int proto)
70 {
71 	BUG_ON(!sk);
72 	BUG_ON(!sock_allow_reclassification(sk));
73 
74 	sock_lock_init_class_and_name(sk,
75 			bt_slock_key_strings[proto], &bt_slock_key[proto],
76 				bt_key_strings[proto], &bt_lock_key[proto]);
77 }
78 EXPORT_SYMBOL(bt_sock_reclassify_lock);
79 
bt_sock_register(int proto,const struct net_proto_family * ops)80 int bt_sock_register(int proto, const struct net_proto_family *ops)
81 {
82 	int err = 0;
83 
84 	if (proto < 0 || proto >= BT_MAX_PROTO)
85 		return -EINVAL;
86 
87 	write_lock(&bt_proto_lock);
88 
89 	if (bt_proto[proto])
90 		err = -EEXIST;
91 	else
92 		bt_proto[proto] = ops;
93 
94 	write_unlock(&bt_proto_lock);
95 
96 	return err;
97 }
98 EXPORT_SYMBOL(bt_sock_register);
99 
bt_sock_unregister(int proto)100 void bt_sock_unregister(int proto)
101 {
102 	if (proto < 0 || proto >= BT_MAX_PROTO)
103 		return;
104 
105 	write_lock(&bt_proto_lock);
106 	bt_proto[proto] = NULL;
107 	write_unlock(&bt_proto_lock);
108 }
109 EXPORT_SYMBOL(bt_sock_unregister);
110 
bt_sock_create(struct net * net,struct socket * sock,int proto,int kern)111 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
112 			  int kern)
113 {
114 	int err;
115 
116 	if (net != &init_net)
117 		return -EAFNOSUPPORT;
118 
119 	if (proto < 0 || proto >= BT_MAX_PROTO)
120 		return -EINVAL;
121 
122 	if (!bt_proto[proto])
123 		request_module("bt-proto-%d", proto);
124 
125 	err = -EPROTONOSUPPORT;
126 
127 	read_lock(&bt_proto_lock);
128 
129 	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
130 		err = bt_proto[proto]->create(net, sock, proto, kern);
131 		if (!err)
132 			bt_sock_reclassify_lock(sock->sk, proto);
133 		module_put(bt_proto[proto]->owner);
134 	}
135 
136 	read_unlock(&bt_proto_lock);
137 
138 	return err;
139 }
140 
bt_sock_link(struct bt_sock_list * l,struct sock * sk)141 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
142 {
143 	write_lock(&l->lock);
144 	sk_add_node(sk, &l->head);
145 	write_unlock(&l->lock);
146 }
147 EXPORT_SYMBOL(bt_sock_link);
148 
bt_sock_unlink(struct bt_sock_list * l,struct sock * sk)149 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
150 {
151 	write_lock(&l->lock);
152 	sk_del_node_init(sk);
153 	write_unlock(&l->lock);
154 }
155 EXPORT_SYMBOL(bt_sock_unlink);
156 
bt_accept_enqueue(struct sock * parent,struct sock * sk,bool bh)157 void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
158 {
159 	BT_DBG("parent %p, sk %p", parent, sk);
160 
161 	sock_hold(sk);
162 
163 	if (bh)
164 		bh_lock_sock_nested(sk);
165 	else
166 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
167 
168 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
169 	bt_sk(sk)->parent = parent;
170 
171 	if (bh)
172 		bh_unlock_sock(sk);
173 	else
174 		release_sock(sk);
175 
176 	sk_acceptq_added(parent);
177 }
178 EXPORT_SYMBOL(bt_accept_enqueue);
179 
180 /* Calling function must hold the sk lock.
181  * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
182  */
bt_accept_unlink(struct sock * sk)183 void bt_accept_unlink(struct sock *sk)
184 {
185 	BT_DBG("sk %p state %d", sk, sk->sk_state);
186 
187 	list_del_init(&bt_sk(sk)->accept_q);
188 	sk_acceptq_removed(bt_sk(sk)->parent);
189 	bt_sk(sk)->parent = NULL;
190 	sock_put(sk);
191 }
192 EXPORT_SYMBOL(bt_accept_unlink);
193 
bt_accept_dequeue(struct sock * parent,struct socket * newsock)194 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
195 {
196 	struct bt_sock *s, *n;
197 	struct sock *sk;
198 
199 	BT_DBG("parent %p", parent);
200 
201 restart:
202 	list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
203 		sk = (struct sock *)s;
204 
205 		/* Prevent early freeing of sk due to unlink and sock_kill */
206 		sock_hold(sk);
207 		lock_sock(sk);
208 
209 		/* Check sk has not already been unlinked via
210 		 * bt_accept_unlink() due to serialisation caused by sk locking
211 		 */
212 		if (!bt_sk(sk)->parent) {
213 			BT_DBG("sk %p, already unlinked", sk);
214 			release_sock(sk);
215 			sock_put(sk);
216 
217 			/* Restart the loop as sk is no longer in the list
218 			 * and also avoid a potential infinite loop because
219 			 * list_for_each_entry_safe() is not thread safe.
220 			 */
221 			goto restart;
222 		}
223 
224 		/* sk is safely in the parent list so reduce reference count */
225 		sock_put(sk);
226 
227 		/* FIXME: Is this check still needed */
228 		if (sk->sk_state == BT_CLOSED) {
229 			bt_accept_unlink(sk);
230 			release_sock(sk);
231 			continue;
232 		}
233 
234 		if (sk->sk_state == BT_CONNECTED || !newsock ||
235 		    test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
236 			bt_accept_unlink(sk);
237 			if (newsock)
238 				sock_graft(sk, newsock);
239 
240 			release_sock(sk);
241 			return sk;
242 		}
243 
244 		release_sock(sk);
245 	}
246 
247 	return NULL;
248 }
249 EXPORT_SYMBOL(bt_accept_dequeue);
250 
bt_sock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)251 int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
252 		    int flags)
253 {
254 	int noblock = flags & MSG_DONTWAIT;
255 	struct sock *sk = sock->sk;
256 	struct sk_buff *skb;
257 	size_t copied;
258 	size_t skblen;
259 	int err;
260 
261 	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
262 
263 	if (flags & MSG_OOB)
264 		return -EOPNOTSUPP;
265 
266 	lock_sock(sk);
267 
268 	skb = skb_recv_datagram(sk, flags, noblock, &err);
269 	if (!skb) {
270 		if (sk->sk_shutdown & RCV_SHUTDOWN)
271 			err = 0;
272 
273 		release_sock(sk);
274 		return err;
275 	}
276 
277 	skblen = skb->len;
278 	copied = skb->len;
279 	if (len < copied) {
280 		msg->msg_flags |= MSG_TRUNC;
281 		copied = len;
282 	}
283 
284 	skb_reset_transport_header(skb);
285 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
286 	if (err == 0) {
287 		sock_recv_ts_and_drops(msg, sk, skb);
288 
289 		if (msg->msg_name && bt_sk(sk)->skb_msg_name)
290 			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
291 						&msg->msg_namelen);
292 
293 		if (bt_sk(sk)->skb_put_cmsg)
294 			bt_sk(sk)->skb_put_cmsg(skb, msg, sk);
295 	}
296 
297 	skb_free_datagram(sk, skb);
298 
299 	release_sock(sk);
300 
301 	if (flags & MSG_TRUNC)
302 		copied = skblen;
303 
304 	return err ? : copied;
305 }
306 EXPORT_SYMBOL(bt_sock_recvmsg);
307 
bt_sock_data_wait(struct sock * sk,long timeo)308 static long bt_sock_data_wait(struct sock *sk, long timeo)
309 {
310 	DECLARE_WAITQUEUE(wait, current);
311 
312 	add_wait_queue(sk_sleep(sk), &wait);
313 	for (;;) {
314 		set_current_state(TASK_INTERRUPTIBLE);
315 
316 		if (!skb_queue_empty(&sk->sk_receive_queue))
317 			break;
318 
319 		if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
320 			break;
321 
322 		if (signal_pending(current) || !timeo)
323 			break;
324 
325 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
326 		release_sock(sk);
327 		timeo = schedule_timeout(timeo);
328 		lock_sock(sk);
329 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
330 	}
331 
332 	__set_current_state(TASK_RUNNING);
333 	remove_wait_queue(sk_sleep(sk), &wait);
334 	return timeo;
335 }
336 
bt_sock_stream_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)337 int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
338 			   size_t size, int flags)
339 {
340 	struct sock *sk = sock->sk;
341 	int err = 0;
342 	size_t target, copied = 0;
343 	long timeo;
344 
345 	if (flags & MSG_OOB)
346 		return -EOPNOTSUPP;
347 
348 	BT_DBG("sk %p size %zu", sk, size);
349 
350 	lock_sock(sk);
351 
352 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
353 	timeo  = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
354 
355 	do {
356 		struct sk_buff *skb;
357 		int chunk;
358 
359 		skb = skb_dequeue(&sk->sk_receive_queue);
360 		if (!skb) {
361 			if (copied >= target)
362 				break;
363 
364 			err = sock_error(sk);
365 			if (err)
366 				break;
367 			if (sk->sk_shutdown & RCV_SHUTDOWN)
368 				break;
369 
370 			err = -EAGAIN;
371 			if (!timeo)
372 				break;
373 
374 			timeo = bt_sock_data_wait(sk, timeo);
375 
376 			if (signal_pending(current)) {
377 				err = sock_intr_errno(timeo);
378 				goto out;
379 			}
380 			continue;
381 		}
382 
383 		chunk = min_t(unsigned int, skb->len, size);
384 		if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
385 			skb_queue_head(&sk->sk_receive_queue, skb);
386 			if (!copied)
387 				copied = -EFAULT;
388 			break;
389 		}
390 		copied += chunk;
391 		size   -= chunk;
392 
393 		sock_recv_ts_and_drops(msg, sk, skb);
394 
395 		if (!(flags & MSG_PEEK)) {
396 			int skb_len = skb_headlen(skb);
397 
398 			if (chunk <= skb_len) {
399 				__skb_pull(skb, chunk);
400 			} else {
401 				struct sk_buff *frag;
402 
403 				__skb_pull(skb, skb_len);
404 				chunk -= skb_len;
405 
406 				skb_walk_frags(skb, frag) {
407 					if (chunk <= frag->len) {
408 						/* Pulling partial data */
409 						skb->len -= chunk;
410 						skb->data_len -= chunk;
411 						__skb_pull(frag, chunk);
412 						break;
413 					} else if (frag->len) {
414 						/* Pulling all frag data */
415 						chunk -= frag->len;
416 						skb->len -= frag->len;
417 						skb->data_len -= frag->len;
418 						__skb_pull(frag, frag->len);
419 					}
420 				}
421 			}
422 
423 			if (skb->len) {
424 				skb_queue_head(&sk->sk_receive_queue, skb);
425 				break;
426 			}
427 			kfree_skb(skb);
428 
429 		} else {
430 			/* put message back and return */
431 			skb_queue_head(&sk->sk_receive_queue, skb);
432 			break;
433 		}
434 	} while (size);
435 
436 out:
437 	release_sock(sk);
438 	return copied ? : err;
439 }
440 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
441 
bt_accept_poll(struct sock * parent)442 static inline __poll_t bt_accept_poll(struct sock *parent)
443 {
444 	struct bt_sock *s, *n;
445 	struct sock *sk;
446 
447 	list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
448 		sk = (struct sock *)s;
449 		if (sk->sk_state == BT_CONNECTED ||
450 		    (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
451 		     sk->sk_state == BT_CONNECT2))
452 			return EPOLLIN | EPOLLRDNORM;
453 	}
454 
455 	return 0;
456 }
457 
bt_sock_poll(struct file * file,struct socket * sock,poll_table * wait)458 __poll_t bt_sock_poll(struct file *file, struct socket *sock,
459 			  poll_table *wait)
460 {
461 	struct sock *sk = sock->sk;
462 	__poll_t mask = 0;
463 
464 	poll_wait(file, sk_sleep(sk), wait);
465 
466 	if (sk->sk_state == BT_LISTEN)
467 		return bt_accept_poll(sk);
468 
469 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
470 		mask |= EPOLLERR |
471 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
472 
473 	if (sk->sk_shutdown & RCV_SHUTDOWN)
474 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
475 
476 	if (sk->sk_shutdown == SHUTDOWN_MASK)
477 		mask |= EPOLLHUP;
478 
479 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
480 		mask |= EPOLLIN | EPOLLRDNORM;
481 
482 	if (sk->sk_state == BT_CLOSED)
483 		mask |= EPOLLHUP;
484 
485 	if (sk->sk_state == BT_CONNECT ||
486 			sk->sk_state == BT_CONNECT2 ||
487 			sk->sk_state == BT_CONFIG)
488 		return mask;
489 
490 	if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
491 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
492 	else
493 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
494 
495 	return mask;
496 }
497 EXPORT_SYMBOL(bt_sock_poll);
498 
bt_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)499 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
500 {
501 	struct sock *sk = sock->sk;
502 	struct sk_buff *skb;
503 	long amount;
504 	int err;
505 
506 	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
507 
508 	switch (cmd) {
509 	case TIOCOUTQ:
510 		if (sk->sk_state == BT_LISTEN)
511 			return -EINVAL;
512 
513 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
514 		if (amount < 0)
515 			amount = 0;
516 		err = put_user(amount, (int __user *) arg);
517 		break;
518 
519 	case TIOCINQ:
520 		if (sk->sk_state == BT_LISTEN)
521 			return -EINVAL;
522 
523 		spin_lock(&sk->sk_receive_queue.lock);
524 		skb = skb_peek(&sk->sk_receive_queue);
525 		amount = skb ? skb->len : 0;
526 		spin_unlock(&sk->sk_receive_queue.lock);
527 
528 		err = put_user(amount, (int __user *)arg);
529 		break;
530 
531 	default:
532 		err = -ENOIOCTLCMD;
533 		break;
534 	}
535 
536 	return err;
537 }
538 EXPORT_SYMBOL(bt_sock_ioctl);
539 
540 /* This function expects the sk lock to be held when called */
bt_sock_wait_state(struct sock * sk,int state,unsigned long timeo)541 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
542 {
543 	DECLARE_WAITQUEUE(wait, current);
544 	int err = 0;
545 
546 	BT_DBG("sk %p", sk);
547 
548 	add_wait_queue(sk_sleep(sk), &wait);
549 	set_current_state(TASK_INTERRUPTIBLE);
550 	while (sk->sk_state != state) {
551 		if (!timeo) {
552 			err = -EINPROGRESS;
553 			break;
554 		}
555 
556 		if (signal_pending(current)) {
557 			err = sock_intr_errno(timeo);
558 			break;
559 		}
560 
561 		release_sock(sk);
562 		timeo = schedule_timeout(timeo);
563 		lock_sock(sk);
564 		set_current_state(TASK_INTERRUPTIBLE);
565 
566 		err = sock_error(sk);
567 		if (err)
568 			break;
569 	}
570 	__set_current_state(TASK_RUNNING);
571 	remove_wait_queue(sk_sleep(sk), &wait);
572 	return err;
573 }
574 EXPORT_SYMBOL(bt_sock_wait_state);
575 
576 /* This function expects the sk lock to be held when called */
bt_sock_wait_ready(struct sock * sk,unsigned long flags)577 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
578 {
579 	DECLARE_WAITQUEUE(wait, current);
580 	unsigned long timeo;
581 	int err = 0;
582 
583 	BT_DBG("sk %p", sk);
584 
585 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
586 
587 	add_wait_queue(sk_sleep(sk), &wait);
588 	set_current_state(TASK_INTERRUPTIBLE);
589 	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
590 		if (!timeo) {
591 			err = -EAGAIN;
592 			break;
593 		}
594 
595 		if (signal_pending(current)) {
596 			err = sock_intr_errno(timeo);
597 			break;
598 		}
599 
600 		release_sock(sk);
601 		timeo = schedule_timeout(timeo);
602 		lock_sock(sk);
603 		set_current_state(TASK_INTERRUPTIBLE);
604 
605 		err = sock_error(sk);
606 		if (err)
607 			break;
608 	}
609 	__set_current_state(TASK_RUNNING);
610 	remove_wait_queue(sk_sleep(sk), &wait);
611 
612 	return err;
613 }
614 EXPORT_SYMBOL(bt_sock_wait_ready);
615 
616 #ifdef CONFIG_PROC_FS
bt_seq_start(struct seq_file * seq,loff_t * pos)617 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
618 	__acquires(seq->private->l->lock)
619 {
620 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
621 
622 	read_lock(&l->lock);
623 	return seq_hlist_start_head(&l->head, *pos);
624 }
625 
bt_seq_next(struct seq_file * seq,void * v,loff_t * pos)626 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
627 {
628 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
629 
630 	return seq_hlist_next(v, &l->head, pos);
631 }
632 
bt_seq_stop(struct seq_file * seq,void * v)633 static void bt_seq_stop(struct seq_file *seq, void *v)
634 	__releases(seq->private->l->lock)
635 {
636 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
637 
638 	read_unlock(&l->lock);
639 }
640 
bt_seq_show(struct seq_file * seq,void * v)641 static int bt_seq_show(struct seq_file *seq, void *v)
642 {
643 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
644 
645 	if (v == SEQ_START_TOKEN) {
646 		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Parent");
647 
648 		if (l->custom_seq_show) {
649 			seq_putc(seq, ' ');
650 			l->custom_seq_show(seq, v);
651 		}
652 
653 		seq_putc(seq, '\n');
654 	} else {
655 		struct sock *sk = sk_entry(v);
656 		struct bt_sock *bt = bt_sk(sk);
657 
658 		seq_printf(seq,
659 			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
660 			   sk,
661 			   refcount_read(&sk->sk_refcnt),
662 			   sk_rmem_alloc_get(sk),
663 			   sk_wmem_alloc_get(sk),
664 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
665 			   sock_i_ino(sk),
666 			   bt->parent? sock_i_ino(bt->parent): 0LU);
667 
668 		if (l->custom_seq_show) {
669 			seq_putc(seq, ' ');
670 			l->custom_seq_show(seq, v);
671 		}
672 
673 		seq_putc(seq, '\n');
674 	}
675 	return 0;
676 }
677 
678 static const struct seq_operations bt_seq_ops = {
679 	.start = bt_seq_start,
680 	.next  = bt_seq_next,
681 	.stop  = bt_seq_stop,
682 	.show  = bt_seq_show,
683 };
684 
bt_procfs_init(struct net * net,const char * name,struct bt_sock_list * sk_list,int (* seq_show)(struct seq_file *,void *))685 int bt_procfs_init(struct net *net, const char *name,
686 		   struct bt_sock_list *sk_list,
687 		   int (* seq_show)(struct seq_file *, void *))
688 {
689 	sk_list->custom_seq_show = seq_show;
690 
691 	if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list))
692 		return -ENOMEM;
693 	return 0;
694 }
695 
bt_procfs_cleanup(struct net * net,const char * name)696 void bt_procfs_cleanup(struct net *net, const char *name)
697 {
698 	remove_proc_entry(name, net->proc_net);
699 }
700 #else
bt_procfs_init(struct net * net,const char * name,struct bt_sock_list * sk_list,int (* seq_show)(struct seq_file *,void *))701 int bt_procfs_init(struct net *net, const char *name,
702 		   struct bt_sock_list *sk_list,
703 		   int (* seq_show)(struct seq_file *, void *))
704 {
705 	return 0;
706 }
707 
bt_procfs_cleanup(struct net * net,const char * name)708 void bt_procfs_cleanup(struct net *net, const char *name)
709 {
710 }
711 #endif
712 EXPORT_SYMBOL(bt_procfs_init);
713 EXPORT_SYMBOL(bt_procfs_cleanup);
714 
715 static const struct net_proto_family bt_sock_family_ops = {
716 	.owner	= THIS_MODULE,
717 	.family	= PF_BLUETOOTH,
718 	.create	= bt_sock_create,
719 };
720 
721 struct dentry *bt_debugfs;
722 EXPORT_SYMBOL_GPL(bt_debugfs);
723 
724 #define VERSION __stringify(BT_SUBSYS_VERSION) "." \
725 		__stringify(BT_SUBSYS_REVISION)
726 
bt_init(void)727 static int __init bt_init(void)
728 {
729 	int err;
730 
731 	sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
732 
733 	BT_INFO("Core ver %s", VERSION);
734 
735 	err = bt_selftest();
736 	if (err < 0)
737 		return err;
738 
739 	bt_debugfs = debugfs_create_dir("bluetooth", NULL);
740 
741 	bt_leds_init();
742 
743 	err = bt_sysfs_init();
744 	if (err < 0)
745 		goto cleanup_led;
746 
747 	err = sock_register(&bt_sock_family_ops);
748 	if (err)
749 		goto cleanup_sysfs;
750 
751 	BT_INFO("HCI device and connection manager initialized");
752 
753 	err = hci_sock_init();
754 	if (err)
755 		goto unregister_socket;
756 
757 	err = l2cap_init();
758 	if (err)
759 		goto cleanup_socket;
760 
761 	err = sco_init();
762 	if (err)
763 		goto cleanup_cap;
764 
765 	err = mgmt_init();
766 	if (err)
767 		goto cleanup_sco;
768 
769 	return 0;
770 
771 cleanup_sco:
772 	sco_exit();
773 cleanup_cap:
774 	l2cap_exit();
775 cleanup_socket:
776 	hci_sock_cleanup();
777 unregister_socket:
778 	sock_unregister(PF_BLUETOOTH);
779 cleanup_sysfs:
780 	bt_sysfs_cleanup();
781 cleanup_led:
782 	bt_leds_cleanup();
783 	return err;
784 }
785 
bt_exit(void)786 static void __exit bt_exit(void)
787 {
788 	mgmt_exit();
789 
790 	sco_exit();
791 
792 	l2cap_exit();
793 
794 	hci_sock_cleanup();
795 
796 	sock_unregister(PF_BLUETOOTH);
797 
798 	bt_sysfs_cleanup();
799 
800 	bt_leds_cleanup();
801 
802 	debugfs_remove_recursive(bt_debugfs);
803 }
804 
805 subsys_initcall(bt_init);
806 module_exit(bt_exit);
807 
808 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
809 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
810 MODULE_VERSION(VERSION);
811 MODULE_LICENSE("GPL");
812 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
813