• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth address family and sockets. */
26 
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <linux/stringify.h>
30 #include <linux/sched/signal.h>
31 
32 #include <asm/ioctls.h>
33 
34 #include <net/bluetooth/bluetooth.h>
35 #include <linux/proc_fs.h>
36 
37 #include "leds.h"
38 #include "selftest.h"
39 
40 /* Bluetooth sockets */
41 #define BT_MAX_PROTO	8
42 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
43 static DEFINE_RWLOCK(bt_proto_lock);
44 
45 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
46 static const char *const bt_key_strings[BT_MAX_PROTO] = {
47 	"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
48 	"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
49 	"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
50 	"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
51 	"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
52 	"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
53 	"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
54 	"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
55 };
56 
57 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
58 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
59 	"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
60 	"slock-AF_BLUETOOTH-BTPROTO_HCI",
61 	"slock-AF_BLUETOOTH-BTPROTO_SCO",
62 	"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
63 	"slock-AF_BLUETOOTH-BTPROTO_BNEP",
64 	"slock-AF_BLUETOOTH-BTPROTO_CMTP",
65 	"slock-AF_BLUETOOTH-BTPROTO_HIDP",
66 	"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
67 };
68 
bt_sock_reclassify_lock(struct sock * sk,int proto)69 void bt_sock_reclassify_lock(struct sock *sk, int proto)
70 {
71 	BUG_ON(!sk);
72 	BUG_ON(!sock_allow_reclassification(sk));
73 
74 	sock_lock_init_class_and_name(sk,
75 			bt_slock_key_strings[proto], &bt_slock_key[proto],
76 				bt_key_strings[proto], &bt_lock_key[proto]);
77 }
78 EXPORT_SYMBOL(bt_sock_reclassify_lock);
79 
bt_sock_register(int proto,const struct net_proto_family * ops)80 int bt_sock_register(int proto, const struct net_proto_family *ops)
81 {
82 	int err = 0;
83 
84 	if (proto < 0 || proto >= BT_MAX_PROTO)
85 		return -EINVAL;
86 
87 	write_lock(&bt_proto_lock);
88 
89 	if (bt_proto[proto])
90 		err = -EEXIST;
91 	else
92 		bt_proto[proto] = ops;
93 
94 	write_unlock(&bt_proto_lock);
95 
96 	return err;
97 }
98 EXPORT_SYMBOL(bt_sock_register);
99 
bt_sock_unregister(int proto)100 void bt_sock_unregister(int proto)
101 {
102 	if (proto < 0 || proto >= BT_MAX_PROTO)
103 		return;
104 
105 	write_lock(&bt_proto_lock);
106 	bt_proto[proto] = NULL;
107 	write_unlock(&bt_proto_lock);
108 }
109 EXPORT_SYMBOL(bt_sock_unregister);
110 
bt_sock_create(struct net * net,struct socket * sock,int proto,int kern)111 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
112 			  int kern)
113 {
114 	int err;
115 
116 	if (net != &init_net)
117 		return -EAFNOSUPPORT;
118 
119 	if (proto < 0 || proto >= BT_MAX_PROTO)
120 		return -EINVAL;
121 
122 	if (!bt_proto[proto])
123 		request_module("bt-proto-%d", proto);
124 
125 	err = -EPROTONOSUPPORT;
126 
127 	read_lock(&bt_proto_lock);
128 
129 	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
130 		err = bt_proto[proto]->create(net, sock, proto, kern);
131 		if (!err)
132 			bt_sock_reclassify_lock(sock->sk, proto);
133 		module_put(bt_proto[proto]->owner);
134 	}
135 
136 	read_unlock(&bt_proto_lock);
137 
138 	return err;
139 }
140 
bt_sock_link(struct bt_sock_list * l,struct sock * sk)141 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
142 {
143 	write_lock(&l->lock);
144 	sk_add_node(sk, &l->head);
145 	write_unlock(&l->lock);
146 }
147 EXPORT_SYMBOL(bt_sock_link);
148 
bt_sock_unlink(struct bt_sock_list * l,struct sock * sk)149 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
150 {
151 	write_lock(&l->lock);
152 	sk_del_node_init(sk);
153 	write_unlock(&l->lock);
154 }
155 EXPORT_SYMBOL(bt_sock_unlink);
156 
bt_accept_enqueue(struct sock * parent,struct sock * sk,bool bh)157 void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
158 {
159 	BT_DBG("parent %p, sk %p", parent, sk);
160 
161 	sock_hold(sk);
162 
163 	if (bh)
164 		bh_lock_sock_nested(sk);
165 	else
166 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
167 
168 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
169 	bt_sk(sk)->parent = parent;
170 
171 	if (bh)
172 		bh_unlock_sock(sk);
173 	else
174 		release_sock(sk);
175 
176 	sk_acceptq_added(parent);
177 }
178 EXPORT_SYMBOL(bt_accept_enqueue);
179 
180 /* Calling function must hold the sk lock.
181  * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
182  */
bt_accept_unlink(struct sock * sk)183 void bt_accept_unlink(struct sock *sk)
184 {
185 	BT_DBG("sk %p state %d", sk, sk->sk_state);
186 
187 	list_del_init(&bt_sk(sk)->accept_q);
188 	sk_acceptq_removed(bt_sk(sk)->parent);
189 	bt_sk(sk)->parent = NULL;
190 	sock_put(sk);
191 }
192 EXPORT_SYMBOL(bt_accept_unlink);
193 
bt_accept_dequeue(struct sock * parent,struct socket * newsock)194 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
195 {
196 	struct bt_sock *s, *n;
197 	struct sock *sk;
198 
199 	BT_DBG("parent %p", parent);
200 
201 restart:
202 	list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
203 		sk = (struct sock *)s;
204 
205 		/* Prevent early freeing of sk due to unlink and sock_kill */
206 		sock_hold(sk);
207 		lock_sock(sk);
208 
209 		/* Check sk has not already been unlinked via
210 		 * bt_accept_unlink() due to serialisation caused by sk locking
211 		 */
212 		if (!bt_sk(sk)->parent) {
213 			BT_DBG("sk %p, already unlinked", sk);
214 			release_sock(sk);
215 			sock_put(sk);
216 
217 			/* Restart the loop as sk is no longer in the list
218 			 * and also avoid a potential infinite loop because
219 			 * list_for_each_entry_safe() is not thread safe.
220 			 */
221 			goto restart;
222 		}
223 
224 		/* sk is safely in the parent list so reduce reference count */
225 		sock_put(sk);
226 
227 		/* FIXME: Is this check still needed */
228 		if (sk->sk_state == BT_CLOSED) {
229 			bt_accept_unlink(sk);
230 			release_sock(sk);
231 			continue;
232 		}
233 
234 		if (sk->sk_state == BT_CONNECTED || !newsock ||
235 		    test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
236 			bt_accept_unlink(sk);
237 			if (newsock)
238 				sock_graft(sk, newsock);
239 
240 			release_sock(sk);
241 			return sk;
242 		}
243 
244 		release_sock(sk);
245 	}
246 
247 	return NULL;
248 }
249 EXPORT_SYMBOL(bt_accept_dequeue);
250 
bt_sock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)251 int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
252 		    int flags)
253 {
254 	int noblock = flags & MSG_DONTWAIT;
255 	struct sock *sk = sock->sk;
256 	struct sk_buff *skb;
257 	size_t copied;
258 	size_t skblen;
259 	int err;
260 
261 	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
262 
263 	if (flags & MSG_OOB)
264 		return -EOPNOTSUPP;
265 
266 	lock_sock(sk);
267 
268 	skb = skb_recv_datagram(sk, flags, noblock, &err);
269 	if (!skb) {
270 		if (sk->sk_shutdown & RCV_SHUTDOWN)
271 			err = 0;
272 
273 		release_sock(sk);
274 		return err;
275 	}
276 
277 	skblen = skb->len;
278 	copied = skb->len;
279 	if (len < copied) {
280 		msg->msg_flags |= MSG_TRUNC;
281 		copied = len;
282 	}
283 
284 	skb_reset_transport_header(skb);
285 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
286 	if (err == 0) {
287 		sock_recv_ts_and_drops(msg, sk, skb);
288 
289 		if (msg->msg_name && bt_sk(sk)->skb_msg_name)
290 			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
291 						&msg->msg_namelen);
292 
293 		if (bt_sk(sk)->skb_put_cmsg)
294 			bt_sk(sk)->skb_put_cmsg(skb, msg, sk);
295 	}
296 
297 	skb_free_datagram(sk, skb);
298 
299 	release_sock(sk);
300 
301 	if (flags & MSG_TRUNC)
302 		copied = skblen;
303 
304 	return err ? : copied;
305 }
306 EXPORT_SYMBOL(bt_sock_recvmsg);
307 
bt_sock_data_wait(struct sock * sk,long timeo)308 static long bt_sock_data_wait(struct sock *sk, long timeo)
309 {
310 	DECLARE_WAITQUEUE(wait, current);
311 
312 	add_wait_queue(sk_sleep(sk), &wait);
313 	for (;;) {
314 		set_current_state(TASK_INTERRUPTIBLE);
315 
316 		if (!skb_queue_empty(&sk->sk_receive_queue))
317 			break;
318 
319 		if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
320 			break;
321 
322 		if (signal_pending(current) || !timeo)
323 			break;
324 
325 		sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
326 		release_sock(sk);
327 		timeo = schedule_timeout(timeo);
328 		lock_sock(sk);
329 		sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
330 	}
331 
332 	__set_current_state(TASK_RUNNING);
333 	remove_wait_queue(sk_sleep(sk), &wait);
334 	return timeo;
335 }
336 
bt_sock_stream_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)337 int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
338 			   size_t size, int flags)
339 {
340 	struct sock *sk = sock->sk;
341 	int err = 0;
342 	size_t target, copied = 0;
343 	long timeo;
344 
345 	if (flags & MSG_OOB)
346 		return -EOPNOTSUPP;
347 
348 	BT_DBG("sk %p size %zu", sk, size);
349 
350 	lock_sock(sk);
351 
352 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
353 	timeo  = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
354 
355 	do {
356 		struct sk_buff *skb;
357 		int chunk;
358 
359 		skb = skb_dequeue(&sk->sk_receive_queue);
360 		if (!skb) {
361 			if (copied >= target)
362 				break;
363 
364 			err = sock_error(sk);
365 			if (err)
366 				break;
367 			if (sk->sk_shutdown & RCV_SHUTDOWN)
368 				break;
369 
370 			err = -EAGAIN;
371 			if (!timeo)
372 				break;
373 
374 			timeo = bt_sock_data_wait(sk, timeo);
375 
376 			if (signal_pending(current)) {
377 				err = sock_intr_errno(timeo);
378 				goto out;
379 			}
380 			continue;
381 		}
382 
383 		chunk = min_t(unsigned int, skb->len, size);
384 		if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
385 			skb_queue_head(&sk->sk_receive_queue, skb);
386 			if (!copied)
387 				copied = -EFAULT;
388 			break;
389 		}
390 		copied += chunk;
391 		size   -= chunk;
392 
393 		sock_recv_ts_and_drops(msg, sk, skb);
394 
395 		if (!(flags & MSG_PEEK)) {
396 			int skb_len = skb_headlen(skb);
397 
398 			if (chunk <= skb_len) {
399 				__skb_pull(skb, chunk);
400 			} else {
401 				struct sk_buff *frag;
402 
403 				__skb_pull(skb, skb_len);
404 				chunk -= skb_len;
405 
406 				skb_walk_frags(skb, frag) {
407 					if (chunk <= frag->len) {
408 						/* Pulling partial data */
409 						skb->len -= chunk;
410 						skb->data_len -= chunk;
411 						__skb_pull(frag, chunk);
412 						break;
413 					} else if (frag->len) {
414 						/* Pulling all frag data */
415 						chunk -= frag->len;
416 						skb->len -= frag->len;
417 						skb->data_len -= frag->len;
418 						__skb_pull(frag, frag->len);
419 					}
420 				}
421 			}
422 
423 			if (skb->len) {
424 				skb_queue_head(&sk->sk_receive_queue, skb);
425 				break;
426 			}
427 			kfree_skb(skb);
428 
429 		} else {
430 			/* put message back and return */
431 			skb_queue_head(&sk->sk_receive_queue, skb);
432 			break;
433 		}
434 	} while (size);
435 
436 out:
437 	release_sock(sk);
438 	return copied ? : err;
439 }
440 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
441 
bt_accept_poll(struct sock * parent)442 static inline __poll_t bt_accept_poll(struct sock *parent)
443 {
444 	struct bt_sock *s, *n;
445 	struct sock *sk;
446 
447 	list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) {
448 		sk = (struct sock *)s;
449 		if (sk->sk_state == BT_CONNECTED ||
450 		    (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
451 		     sk->sk_state == BT_CONNECT2))
452 			return EPOLLIN | EPOLLRDNORM;
453 	}
454 
455 	return 0;
456 }
457 
bt_sock_poll(struct file * file,struct socket * sock,poll_table * wait)458 __poll_t bt_sock_poll(struct file *file, struct socket *sock,
459 			  poll_table *wait)
460 {
461 	struct sock *sk = sock->sk;
462 	__poll_t mask = 0;
463 
464 	poll_wait(file, sk_sleep(sk), wait);
465 
466 	if (sk->sk_state == BT_LISTEN)
467 		return bt_accept_poll(sk);
468 
469 	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
470 		mask |= EPOLLERR |
471 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
472 
473 	if (sk->sk_shutdown & RCV_SHUTDOWN)
474 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
475 
476 	if (sk->sk_shutdown == SHUTDOWN_MASK)
477 		mask |= EPOLLHUP;
478 
479 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
480 		mask |= EPOLLIN | EPOLLRDNORM;
481 
482 	if (sk->sk_state == BT_CLOSED)
483 		mask |= EPOLLHUP;
484 
485 	if (sk->sk_state == BT_CONNECT ||
486 			sk->sk_state == BT_CONNECT2 ||
487 			sk->sk_state == BT_CONFIG)
488 		return mask;
489 
490 	if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
491 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
492 	else
493 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
494 
495 	return mask;
496 }
497 EXPORT_SYMBOL(bt_sock_poll);
498 
bt_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)499 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
500 {
501 	struct sock *sk = sock->sk;
502 	struct sk_buff *skb;
503 	long amount;
504 	int err;
505 
506 	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
507 
508 	switch (cmd) {
509 	case TIOCOUTQ:
510 		if (sk->sk_state == BT_LISTEN)
511 			return -EINVAL;
512 
513 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
514 		if (amount < 0)
515 			amount = 0;
516 		err = put_user(amount, (int __user *) arg);
517 		break;
518 
519 	case TIOCINQ:
520 		if (sk->sk_state == BT_LISTEN)
521 			return -EINVAL;
522 
523 		lock_sock(sk);
524 		skb = skb_peek(&sk->sk_receive_queue);
525 		amount = skb ? skb->len : 0;
526 		release_sock(sk);
527 		err = put_user(amount, (int __user *) arg);
528 		break;
529 
530 	default:
531 		err = -ENOIOCTLCMD;
532 		break;
533 	}
534 
535 	return err;
536 }
537 EXPORT_SYMBOL(bt_sock_ioctl);
538 
539 /* This function expects the sk lock to be held when called */
bt_sock_wait_state(struct sock * sk,int state,unsigned long timeo)540 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
541 {
542 	DECLARE_WAITQUEUE(wait, current);
543 	int err = 0;
544 
545 	BT_DBG("sk %p", sk);
546 
547 	add_wait_queue(sk_sleep(sk), &wait);
548 	set_current_state(TASK_INTERRUPTIBLE);
549 	while (sk->sk_state != state) {
550 		if (!timeo) {
551 			err = -EINPROGRESS;
552 			break;
553 		}
554 
555 		if (signal_pending(current)) {
556 			err = sock_intr_errno(timeo);
557 			break;
558 		}
559 
560 		release_sock(sk);
561 		timeo = schedule_timeout(timeo);
562 		lock_sock(sk);
563 		set_current_state(TASK_INTERRUPTIBLE);
564 
565 		err = sock_error(sk);
566 		if (err)
567 			break;
568 	}
569 	__set_current_state(TASK_RUNNING);
570 	remove_wait_queue(sk_sleep(sk), &wait);
571 	return err;
572 }
573 EXPORT_SYMBOL(bt_sock_wait_state);
574 
575 /* This function expects the sk lock to be held when called */
bt_sock_wait_ready(struct sock * sk,unsigned long flags)576 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
577 {
578 	DECLARE_WAITQUEUE(wait, current);
579 	unsigned long timeo;
580 	int err = 0;
581 
582 	BT_DBG("sk %p", sk);
583 
584 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
585 
586 	add_wait_queue(sk_sleep(sk), &wait);
587 	set_current_state(TASK_INTERRUPTIBLE);
588 	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
589 		if (!timeo) {
590 			err = -EAGAIN;
591 			break;
592 		}
593 
594 		if (signal_pending(current)) {
595 			err = sock_intr_errno(timeo);
596 			break;
597 		}
598 
599 		release_sock(sk);
600 		timeo = schedule_timeout(timeo);
601 		lock_sock(sk);
602 		set_current_state(TASK_INTERRUPTIBLE);
603 
604 		err = sock_error(sk);
605 		if (err)
606 			break;
607 	}
608 	__set_current_state(TASK_RUNNING);
609 	remove_wait_queue(sk_sleep(sk), &wait);
610 
611 	return err;
612 }
613 EXPORT_SYMBOL(bt_sock_wait_ready);
614 
615 #ifdef CONFIG_PROC_FS
bt_seq_start(struct seq_file * seq,loff_t * pos)616 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
617 	__acquires(seq->private->l->lock)
618 {
619 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
620 
621 	read_lock(&l->lock);
622 	return seq_hlist_start_head(&l->head, *pos);
623 }
624 
bt_seq_next(struct seq_file * seq,void * v,loff_t * pos)625 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
626 {
627 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
628 
629 	return seq_hlist_next(v, &l->head, pos);
630 }
631 
bt_seq_stop(struct seq_file * seq,void * v)632 static void bt_seq_stop(struct seq_file *seq, void *v)
633 	__releases(seq->private->l->lock)
634 {
635 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
636 
637 	read_unlock(&l->lock);
638 }
639 
bt_seq_show(struct seq_file * seq,void * v)640 static int bt_seq_show(struct seq_file *seq, void *v)
641 {
642 	struct bt_sock_list *l = PDE_DATA(file_inode(seq->file));
643 
644 	if (v == SEQ_START_TOKEN) {
645 		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Parent");
646 
647 		if (l->custom_seq_show) {
648 			seq_putc(seq, ' ');
649 			l->custom_seq_show(seq, v);
650 		}
651 
652 		seq_putc(seq, '\n');
653 	} else {
654 		struct sock *sk = sk_entry(v);
655 		struct bt_sock *bt = bt_sk(sk);
656 
657 		seq_printf(seq,
658 			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
659 			   sk,
660 			   refcount_read(&sk->sk_refcnt),
661 			   sk_rmem_alloc_get(sk),
662 			   sk_wmem_alloc_get(sk),
663 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
664 			   sock_i_ino(sk),
665 			   bt->parent? sock_i_ino(bt->parent): 0LU);
666 
667 		if (l->custom_seq_show) {
668 			seq_putc(seq, ' ');
669 			l->custom_seq_show(seq, v);
670 		}
671 
672 		seq_putc(seq, '\n');
673 	}
674 	return 0;
675 }
676 
677 static const struct seq_operations bt_seq_ops = {
678 	.start = bt_seq_start,
679 	.next  = bt_seq_next,
680 	.stop  = bt_seq_stop,
681 	.show  = bt_seq_show,
682 };
683 
bt_procfs_init(struct net * net,const char * name,struct bt_sock_list * sk_list,int (* seq_show)(struct seq_file *,void *))684 int bt_procfs_init(struct net *net, const char *name,
685 		   struct bt_sock_list *sk_list,
686 		   int (* seq_show)(struct seq_file *, void *))
687 {
688 	sk_list->custom_seq_show = seq_show;
689 
690 	if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list))
691 		return -ENOMEM;
692 	return 0;
693 }
694 
bt_procfs_cleanup(struct net * net,const char * name)695 void bt_procfs_cleanup(struct net *net, const char *name)
696 {
697 	remove_proc_entry(name, net->proc_net);
698 }
699 #else
bt_procfs_init(struct net * net,const char * name,struct bt_sock_list * sk_list,int (* seq_show)(struct seq_file *,void *))700 int bt_procfs_init(struct net *net, const char *name,
701 		   struct bt_sock_list *sk_list,
702 		   int (* seq_show)(struct seq_file *, void *))
703 {
704 	return 0;
705 }
706 
bt_procfs_cleanup(struct net * net,const char * name)707 void bt_procfs_cleanup(struct net *net, const char *name)
708 {
709 }
710 #endif
711 EXPORT_SYMBOL(bt_procfs_init);
712 EXPORT_SYMBOL(bt_procfs_cleanup);
713 
714 static const struct net_proto_family bt_sock_family_ops = {
715 	.owner	= THIS_MODULE,
716 	.family	= PF_BLUETOOTH,
717 	.create	= bt_sock_create,
718 };
719 
720 struct dentry *bt_debugfs;
721 EXPORT_SYMBOL_GPL(bt_debugfs);
722 
723 #define VERSION __stringify(BT_SUBSYS_VERSION) "." \
724 		__stringify(BT_SUBSYS_REVISION)
725 
bt_init(void)726 static int __init bt_init(void)
727 {
728 	int err;
729 
730 	sock_skb_cb_check_size(sizeof(struct bt_skb_cb));
731 
732 	BT_INFO("Core ver %s", VERSION);
733 
734 	err = bt_selftest();
735 	if (err < 0)
736 		return err;
737 
738 	bt_debugfs = debugfs_create_dir("bluetooth", NULL);
739 
740 	bt_leds_init();
741 
742 	err = bt_sysfs_init();
743 	if (err < 0)
744 		goto cleanup_led;
745 
746 	err = sock_register(&bt_sock_family_ops);
747 	if (err)
748 		goto cleanup_sysfs;
749 
750 	BT_INFO("HCI device and connection manager initialized");
751 
752 	err = hci_sock_init();
753 	if (err)
754 		goto unregister_socket;
755 
756 	err = l2cap_init();
757 	if (err)
758 		goto cleanup_socket;
759 
760 	err = sco_init();
761 	if (err)
762 		goto cleanup_cap;
763 
764 	err = mgmt_init();
765 	if (err)
766 		goto cleanup_sco;
767 
768 	return 0;
769 
770 cleanup_sco:
771 	sco_exit();
772 cleanup_cap:
773 	l2cap_exit();
774 cleanup_socket:
775 	hci_sock_cleanup();
776 unregister_socket:
777 	sock_unregister(PF_BLUETOOTH);
778 cleanup_sysfs:
779 	bt_sysfs_cleanup();
780 cleanup_led:
781 	bt_leds_cleanup();
782 	return err;
783 }
784 
bt_exit(void)785 static void __exit bt_exit(void)
786 {
787 	mgmt_exit();
788 
789 	sco_exit();
790 
791 	l2cap_exit();
792 
793 	hci_sock_cleanup();
794 
795 	sock_unregister(PF_BLUETOOTH);
796 
797 	bt_sysfs_cleanup();
798 
799 	bt_leds_cleanup();
800 
801 	debugfs_remove_recursive(bt_debugfs);
802 }
803 
804 subsys_initcall(bt_init);
805 module_exit(bt_exit);
806 
807 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
808 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
809 MODULE_VERSION(VERSION);
810 MODULE_LICENSE("GPL");
811 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
812