• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI sockets. */
26 
27 #include <linux/export.h>
28 #include <linux/utsname.h>
29 #include <linux/sched.h>
30 #include <asm/unaligned.h>
31 
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
34 #include <net/bluetooth/hci_mon.h>
35 #include <net/bluetooth/mgmt.h>
36 
37 #include "mgmt_util.h"
38 
39 static LIST_HEAD(mgmt_chan_list);
40 static DEFINE_MUTEX(mgmt_chan_list_lock);
41 
42 static DEFINE_IDA(sock_cookie_ida);
43 
44 static atomic_t monitor_promisc = ATOMIC_INIT(0);
45 
46 /* ----- HCI socket interface ----- */
47 
48 /* Socket info */
49 #define hci_pi(sk) ((struct hci_pinfo *) sk)
50 
51 struct hci_pinfo {
52 	struct bt_sock    bt;
53 	struct hci_dev    *hdev;
54 	struct hci_filter filter;
55 	__u32             cmsg_mask;
56 	unsigned short    channel;
57 	unsigned long     flags;
58 	__u32             cookie;
59 	char              comm[TASK_COMM_LEN];
60 };
61 
hci_sock_set_flag(struct sock * sk,int nr)62 void hci_sock_set_flag(struct sock *sk, int nr)
63 {
64 	set_bit(nr, &hci_pi(sk)->flags);
65 }
66 
hci_sock_clear_flag(struct sock * sk,int nr)67 void hci_sock_clear_flag(struct sock *sk, int nr)
68 {
69 	clear_bit(nr, &hci_pi(sk)->flags);
70 }
71 
hci_sock_test_flag(struct sock * sk,int nr)72 int hci_sock_test_flag(struct sock *sk, int nr)
73 {
74 	return test_bit(nr, &hci_pi(sk)->flags);
75 }
76 
hci_sock_get_channel(struct sock * sk)77 unsigned short hci_sock_get_channel(struct sock *sk)
78 {
79 	return hci_pi(sk)->channel;
80 }
81 
hci_sock_get_cookie(struct sock * sk)82 u32 hci_sock_get_cookie(struct sock *sk)
83 {
84 	return hci_pi(sk)->cookie;
85 }
86 
hci_sock_gen_cookie(struct sock * sk)87 static bool hci_sock_gen_cookie(struct sock *sk)
88 {
89 	int id = hci_pi(sk)->cookie;
90 
91 	if (!id) {
92 		id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL);
93 		if (id < 0)
94 			id = 0xffffffff;
95 
96 		hci_pi(sk)->cookie = id;
97 		get_task_comm(hci_pi(sk)->comm, current);
98 		return true;
99 	}
100 
101 	return false;
102 }
103 
hci_sock_free_cookie(struct sock * sk)104 static void hci_sock_free_cookie(struct sock *sk)
105 {
106 	int id = hci_pi(sk)->cookie;
107 
108 	if (id) {
109 		hci_pi(sk)->cookie = 0xffffffff;
110 		ida_simple_remove(&sock_cookie_ida, id);
111 	}
112 }
113 
hci_test_bit(int nr,const void * addr)114 static inline int hci_test_bit(int nr, const void *addr)
115 {
116 	return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
117 }
118 
119 /* Security filter */
120 #define HCI_SFLT_MAX_OGF  5
121 
122 struct hci_sec_filter {
123 	__u32 type_mask;
124 	__u32 event_mask[2];
125 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
126 };
127 
128 static const struct hci_sec_filter hci_sec_filter = {
129 	/* Packet types */
130 	0x10,
131 	/* Events */
132 	{ 0x1000d9fe, 0x0000b00c },
133 	/* Commands */
134 	{
135 		{ 0x0 },
136 		/* OGF_LINK_CTL */
137 		{ 0xbe000006, 0x00000001, 0x00000000, 0x00 },
138 		/* OGF_LINK_POLICY */
139 		{ 0x00005200, 0x00000000, 0x00000000, 0x00 },
140 		/* OGF_HOST_CTL */
141 		{ 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
142 		/* OGF_INFO_PARAM */
143 		{ 0x000002be, 0x00000000, 0x00000000, 0x00 },
144 		/* OGF_STATUS_PARAM */
145 		{ 0x000000ea, 0x00000000, 0x00000000, 0x00 }
146 	}
147 };
148 
149 static struct bt_sock_list hci_sk_list = {
150 	.lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
151 };
152 
is_filtered_packet(struct sock * sk,struct sk_buff * skb)153 static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
154 {
155 	struct hci_filter *flt;
156 	int flt_type, flt_event;
157 
158 	/* Apply filter */
159 	flt = &hci_pi(sk)->filter;
160 
161 	flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS;
162 
163 	if (!test_bit(flt_type, &flt->type_mask))
164 		return true;
165 
166 	/* Extra filter for event packets only */
167 	if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT)
168 		return false;
169 
170 	flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
171 
172 	if (!hci_test_bit(flt_event, &flt->event_mask))
173 		return true;
174 
175 	/* Check filter only when opcode is set */
176 	if (!flt->opcode)
177 		return false;
178 
179 	if (flt_event == HCI_EV_CMD_COMPLETE &&
180 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
181 		return true;
182 
183 	if (flt_event == HCI_EV_CMD_STATUS &&
184 	    flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
185 		return true;
186 
187 	return false;
188 }
189 
190 /* Send frame to RAW socket */
hci_send_to_sock(struct hci_dev * hdev,struct sk_buff * skb)191 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
192 {
193 	struct sock *sk;
194 	struct sk_buff *skb_copy = NULL;
195 
196 	BT_DBG("hdev %p len %d", hdev, skb->len);
197 
198 	read_lock(&hci_sk_list.lock);
199 
200 	sk_for_each(sk, &hci_sk_list.head) {
201 		struct sk_buff *nskb;
202 
203 		if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
204 			continue;
205 
206 		/* Don't send frame to the socket it came from */
207 		if (skb->sk == sk)
208 			continue;
209 
210 		if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
211 			if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
212 			    hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
213 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
214 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
215 				continue;
216 			if (is_filtered_packet(sk, skb))
217 				continue;
218 		} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
219 			if (!bt_cb(skb)->incoming)
220 				continue;
221 			if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
222 			    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
223 			    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT)
224 				continue;
225 		} else {
226 			/* Don't send frame to other channel types */
227 			continue;
228 		}
229 
230 		if (!skb_copy) {
231 			/* Create a private copy with headroom */
232 			skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
233 			if (!skb_copy)
234 				continue;
235 
236 			/* Put type byte before the data */
237 			memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1);
238 		}
239 
240 		nskb = skb_clone(skb_copy, GFP_ATOMIC);
241 		if (!nskb)
242 			continue;
243 
244 		if (sock_queue_rcv_skb(sk, nskb))
245 			kfree_skb(nskb);
246 	}
247 
248 	read_unlock(&hci_sk_list.lock);
249 
250 	kfree_skb(skb_copy);
251 }
252 
253 /* Send frame to sockets with specific channel */
__hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)254 static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
255 				  int flag, struct sock *skip_sk)
256 {
257 	struct sock *sk;
258 
259 	BT_DBG("channel %u len %d", channel, skb->len);
260 
261 	sk_for_each(sk, &hci_sk_list.head) {
262 		struct sk_buff *nskb;
263 
264 		/* Ignore socket without the flag set */
265 		if (!hci_sock_test_flag(sk, flag))
266 			continue;
267 
268 		/* Skip the original socket */
269 		if (sk == skip_sk)
270 			continue;
271 
272 		if (sk->sk_state != BT_BOUND)
273 			continue;
274 
275 		if (hci_pi(sk)->channel != channel)
276 			continue;
277 
278 		nskb = skb_clone(skb, GFP_ATOMIC);
279 		if (!nskb)
280 			continue;
281 
282 		if (sock_queue_rcv_skb(sk, nskb))
283 			kfree_skb(nskb);
284 	}
285 
286 }
287 
hci_send_to_channel(unsigned short channel,struct sk_buff * skb,int flag,struct sock * skip_sk)288 void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
289 			 int flag, struct sock *skip_sk)
290 {
291 	read_lock(&hci_sk_list.lock);
292 	__hci_send_to_channel(channel, skb, flag, skip_sk);
293 	read_unlock(&hci_sk_list.lock);
294 }
295 
296 /* Send frame to monitor socket */
hci_send_to_monitor(struct hci_dev * hdev,struct sk_buff * skb)297 void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
298 {
299 	struct sk_buff *skb_copy = NULL;
300 	struct hci_mon_hdr *hdr;
301 	__le16 opcode;
302 
303 	if (!atomic_read(&monitor_promisc))
304 		return;
305 
306 	BT_DBG("hdev %p len %d", hdev, skb->len);
307 
308 	switch (hci_skb_pkt_type(skb)) {
309 	case HCI_COMMAND_PKT:
310 		opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
311 		break;
312 	case HCI_EVENT_PKT:
313 		opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
314 		break;
315 	case HCI_ACLDATA_PKT:
316 		if (bt_cb(skb)->incoming)
317 			opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
318 		else
319 			opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
320 		break;
321 	case HCI_SCODATA_PKT:
322 		if (bt_cb(skb)->incoming)
323 			opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
324 		else
325 			opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
326 		break;
327 	case HCI_DIAG_PKT:
328 		opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
329 		break;
330 	default:
331 		return;
332 	}
333 
334 	/* Create a private copy with headroom */
335 	skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
336 	if (!skb_copy)
337 		return;
338 
339 	/* Put header before the data */
340 	hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE);
341 	hdr->opcode = opcode;
342 	hdr->index = cpu_to_le16(hdev->id);
343 	hdr->len = cpu_to_le16(skb->len);
344 
345 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
346 			    HCI_SOCK_TRUSTED, NULL);
347 	kfree_skb(skb_copy);
348 }
349 
hci_send_monitor_ctrl_event(struct hci_dev * hdev,u16 event,void * data,u16 data_len,ktime_t tstamp,int flag,struct sock * skip_sk)350 void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
351 				 void *data, u16 data_len, ktime_t tstamp,
352 				 int flag, struct sock *skip_sk)
353 {
354 	struct sock *sk;
355 	__le16 index;
356 
357 	if (hdev)
358 		index = cpu_to_le16(hdev->id);
359 	else
360 		index = cpu_to_le16(MGMT_INDEX_NONE);
361 
362 	read_lock(&hci_sk_list.lock);
363 
364 	sk_for_each(sk, &hci_sk_list.head) {
365 		struct hci_mon_hdr *hdr;
366 		struct sk_buff *skb;
367 
368 		if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
369 			continue;
370 
371 		/* Ignore socket without the flag set */
372 		if (!hci_sock_test_flag(sk, flag))
373 			continue;
374 
375 		/* Skip the original socket */
376 		if (sk == skip_sk)
377 			continue;
378 
379 		skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC);
380 		if (!skb)
381 			continue;
382 
383 		put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
384 		put_unaligned_le16(event, skb_put(skb, 2));
385 
386 		if (data)
387 			skb_put_data(skb, data, data_len);
388 
389 		skb->tstamp = tstamp;
390 
391 		hdr = skb_push(skb, HCI_MON_HDR_SIZE);
392 		hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT);
393 		hdr->index = index;
394 		hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
395 
396 		__hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
397 				      HCI_SOCK_TRUSTED, NULL);
398 		kfree_skb(skb);
399 	}
400 
401 	read_unlock(&hci_sk_list.lock);
402 }
403 
create_monitor_event(struct hci_dev * hdev,int event)404 static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
405 {
406 	struct hci_mon_hdr *hdr;
407 	struct hci_mon_new_index *ni;
408 	struct hci_mon_index_info *ii;
409 	struct sk_buff *skb;
410 	__le16 opcode;
411 
412 	switch (event) {
413 	case HCI_DEV_REG:
414 		skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
415 		if (!skb)
416 			return NULL;
417 
418 		ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
419 		ni->type = hdev->dev_type;
420 		ni->bus = hdev->bus;
421 		bacpy(&ni->bdaddr, &hdev->bdaddr);
422 		memcpy(ni->name, hdev->name, 8);
423 
424 		opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
425 		break;
426 
427 	case HCI_DEV_UNREG:
428 		skb = bt_skb_alloc(0, GFP_ATOMIC);
429 		if (!skb)
430 			return NULL;
431 
432 		opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
433 		break;
434 
435 	case HCI_DEV_SETUP:
436 		if (hdev->manufacturer == 0xffff)
437 			return NULL;
438 
439 		/* fall through */
440 
441 	case HCI_DEV_UP:
442 		skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
443 		if (!skb)
444 			return NULL;
445 
446 		ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
447 		bacpy(&ii->bdaddr, &hdev->bdaddr);
448 		ii->manufacturer = cpu_to_le16(hdev->manufacturer);
449 
450 		opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
451 		break;
452 
453 	case HCI_DEV_OPEN:
454 		skb = bt_skb_alloc(0, GFP_ATOMIC);
455 		if (!skb)
456 			return NULL;
457 
458 		opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
459 		break;
460 
461 	case HCI_DEV_CLOSE:
462 		skb = bt_skb_alloc(0, GFP_ATOMIC);
463 		if (!skb)
464 			return NULL;
465 
466 		opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
467 		break;
468 
469 	default:
470 		return NULL;
471 	}
472 
473 	__net_timestamp(skb);
474 
475 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
476 	hdr->opcode = opcode;
477 	hdr->index = cpu_to_le16(hdev->id);
478 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
479 
480 	return skb;
481 }
482 
create_monitor_ctrl_open(struct sock * sk)483 static struct sk_buff *create_monitor_ctrl_open(struct sock *sk)
484 {
485 	struct hci_mon_hdr *hdr;
486 	struct sk_buff *skb;
487 	u16 format;
488 	u8 ver[3];
489 	u32 flags;
490 
491 	/* No message needed when cookie is not present */
492 	if (!hci_pi(sk)->cookie)
493 		return NULL;
494 
495 	switch (hci_pi(sk)->channel) {
496 	case HCI_CHANNEL_RAW:
497 		format = 0x0000;
498 		ver[0] = BT_SUBSYS_VERSION;
499 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
500 		break;
501 	case HCI_CHANNEL_USER:
502 		format = 0x0001;
503 		ver[0] = BT_SUBSYS_VERSION;
504 		put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1);
505 		break;
506 	case HCI_CHANNEL_CONTROL:
507 		format = 0x0002;
508 		mgmt_fill_version_info(ver);
509 		break;
510 	default:
511 		/* No message for unsupported format */
512 		return NULL;
513 	}
514 
515 	skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC);
516 	if (!skb)
517 		return NULL;
518 
519 	flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0;
520 
521 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
522 	put_unaligned_le16(format, skb_put(skb, 2));
523 	skb_put_data(skb, ver, sizeof(ver));
524 	put_unaligned_le32(flags, skb_put(skb, 4));
525 	skb_put_u8(skb, TASK_COMM_LEN);
526 	skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN);
527 
528 	__net_timestamp(skb);
529 
530 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
531 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN);
532 	if (hci_pi(sk)->hdev)
533 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
534 	else
535 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
536 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
537 
538 	return skb;
539 }
540 
create_monitor_ctrl_close(struct sock * sk)541 static struct sk_buff *create_monitor_ctrl_close(struct sock *sk)
542 {
543 	struct hci_mon_hdr *hdr;
544 	struct sk_buff *skb;
545 
546 	/* No message needed when cookie is not present */
547 	if (!hci_pi(sk)->cookie)
548 		return NULL;
549 
550 	switch (hci_pi(sk)->channel) {
551 	case HCI_CHANNEL_RAW:
552 	case HCI_CHANNEL_USER:
553 	case HCI_CHANNEL_CONTROL:
554 		break;
555 	default:
556 		/* No message for unsupported format */
557 		return NULL;
558 	}
559 
560 	skb = bt_skb_alloc(4, GFP_ATOMIC);
561 	if (!skb)
562 		return NULL;
563 
564 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
565 
566 	__net_timestamp(skb);
567 
568 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
569 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE);
570 	if (hci_pi(sk)->hdev)
571 		hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id);
572 	else
573 		hdr->index = cpu_to_le16(HCI_DEV_NONE);
574 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
575 
576 	return skb;
577 }
578 
create_monitor_ctrl_command(struct sock * sk,u16 index,u16 opcode,u16 len,const void * buf)579 static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index,
580 						   u16 opcode, u16 len,
581 						   const void *buf)
582 {
583 	struct hci_mon_hdr *hdr;
584 	struct sk_buff *skb;
585 
586 	skb = bt_skb_alloc(6 + len, GFP_ATOMIC);
587 	if (!skb)
588 		return NULL;
589 
590 	put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4));
591 	put_unaligned_le16(opcode, skb_put(skb, 2));
592 
593 	if (buf)
594 		skb_put_data(skb, buf, len);
595 
596 	__net_timestamp(skb);
597 
598 	hdr = skb_push(skb, HCI_MON_HDR_SIZE);
599 	hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND);
600 	hdr->index = cpu_to_le16(index);
601 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
602 
603 	return skb;
604 }
605 
606 static void __printf(2, 3)
send_monitor_note(struct sock * sk,const char * fmt,...)607 send_monitor_note(struct sock *sk, const char *fmt, ...)
608 {
609 	size_t len;
610 	struct hci_mon_hdr *hdr;
611 	struct sk_buff *skb;
612 	va_list args;
613 
614 	va_start(args, fmt);
615 	len = vsnprintf(NULL, 0, fmt, args);
616 	va_end(args);
617 
618 	skb = bt_skb_alloc(len + 1, GFP_ATOMIC);
619 	if (!skb)
620 		return;
621 
622 	va_start(args, fmt);
623 	vsprintf(skb_put(skb, len), fmt, args);
624 	*(u8 *)skb_put(skb, 1) = 0;
625 	va_end(args);
626 
627 	__net_timestamp(skb);
628 
629 	hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE);
630 	hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE);
631 	hdr->index = cpu_to_le16(HCI_DEV_NONE);
632 	hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
633 
634 	if (sock_queue_rcv_skb(sk, skb))
635 		kfree_skb(skb);
636 }
637 
send_monitor_replay(struct sock * sk)638 static void send_monitor_replay(struct sock *sk)
639 {
640 	struct hci_dev *hdev;
641 
642 	read_lock(&hci_dev_list_lock);
643 
644 	list_for_each_entry(hdev, &hci_dev_list, list) {
645 		struct sk_buff *skb;
646 
647 		skb = create_monitor_event(hdev, HCI_DEV_REG);
648 		if (!skb)
649 			continue;
650 
651 		if (sock_queue_rcv_skb(sk, skb))
652 			kfree_skb(skb);
653 
654 		if (!test_bit(HCI_RUNNING, &hdev->flags))
655 			continue;
656 
657 		skb = create_monitor_event(hdev, HCI_DEV_OPEN);
658 		if (!skb)
659 			continue;
660 
661 		if (sock_queue_rcv_skb(sk, skb))
662 			kfree_skb(skb);
663 
664 		if (test_bit(HCI_UP, &hdev->flags))
665 			skb = create_monitor_event(hdev, HCI_DEV_UP);
666 		else if (hci_dev_test_flag(hdev, HCI_SETUP))
667 			skb = create_monitor_event(hdev, HCI_DEV_SETUP);
668 		else
669 			skb = NULL;
670 
671 		if (skb) {
672 			if (sock_queue_rcv_skb(sk, skb))
673 				kfree_skb(skb);
674 		}
675 	}
676 
677 	read_unlock(&hci_dev_list_lock);
678 }
679 
send_monitor_control_replay(struct sock * mon_sk)680 static void send_monitor_control_replay(struct sock *mon_sk)
681 {
682 	struct sock *sk;
683 
684 	read_lock(&hci_sk_list.lock);
685 
686 	sk_for_each(sk, &hci_sk_list.head) {
687 		struct sk_buff *skb;
688 
689 		skb = create_monitor_ctrl_open(sk);
690 		if (!skb)
691 			continue;
692 
693 		if (sock_queue_rcv_skb(mon_sk, skb))
694 			kfree_skb(skb);
695 	}
696 
697 	read_unlock(&hci_sk_list.lock);
698 }
699 
700 /* Generate internal stack event */
hci_si_event(struct hci_dev * hdev,int type,int dlen,void * data)701 static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
702 {
703 	struct hci_event_hdr *hdr;
704 	struct hci_ev_stack_internal *ev;
705 	struct sk_buff *skb;
706 
707 	skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
708 	if (!skb)
709 		return;
710 
711 	hdr = skb_put(skb, HCI_EVENT_HDR_SIZE);
712 	hdr->evt  = HCI_EV_STACK_INTERNAL;
713 	hdr->plen = sizeof(*ev) + dlen;
714 
715 	ev = skb_put(skb, sizeof(*ev) + dlen);
716 	ev->type = type;
717 	memcpy(ev->data, data, dlen);
718 
719 	bt_cb(skb)->incoming = 1;
720 	__net_timestamp(skb);
721 
722 	hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
723 	hci_send_to_sock(hdev, skb);
724 	kfree_skb(skb);
725 }
726 
hci_sock_dev_event(struct hci_dev * hdev,int event)727 void hci_sock_dev_event(struct hci_dev *hdev, int event)
728 {
729 	BT_DBG("hdev %s event %d", hdev->name, event);
730 
731 	if (atomic_read(&monitor_promisc)) {
732 		struct sk_buff *skb;
733 
734 		/* Send event to monitor */
735 		skb = create_monitor_event(hdev, event);
736 		if (skb) {
737 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
738 					    HCI_SOCK_TRUSTED, NULL);
739 			kfree_skb(skb);
740 		}
741 	}
742 
743 	if (event <= HCI_DEV_DOWN) {
744 		struct hci_ev_si_device ev;
745 
746 		/* Send event to sockets */
747 		ev.event  = event;
748 		ev.dev_id = hdev->id;
749 		hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
750 	}
751 
752 	if (event == HCI_DEV_UNREG) {
753 		struct sock *sk;
754 
755 		/* Detach sockets from device */
756 		read_lock(&hci_sk_list.lock);
757 		sk_for_each(sk, &hci_sk_list.head) {
758 			bh_lock_sock_nested(sk);
759 			if (hci_pi(sk)->hdev == hdev) {
760 				hci_pi(sk)->hdev = NULL;
761 				sk->sk_err = EPIPE;
762 				sk->sk_state = BT_OPEN;
763 				sk->sk_state_change(sk);
764 
765 				hci_dev_put(hdev);
766 			}
767 			bh_unlock_sock(sk);
768 		}
769 		read_unlock(&hci_sk_list.lock);
770 	}
771 }
772 
__hci_mgmt_chan_find(unsigned short channel)773 static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
774 {
775 	struct hci_mgmt_chan *c;
776 
777 	list_for_each_entry(c, &mgmt_chan_list, list) {
778 		if (c->channel == channel)
779 			return c;
780 	}
781 
782 	return NULL;
783 }
784 
hci_mgmt_chan_find(unsigned short channel)785 static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
786 {
787 	struct hci_mgmt_chan *c;
788 
789 	mutex_lock(&mgmt_chan_list_lock);
790 	c = __hci_mgmt_chan_find(channel);
791 	mutex_unlock(&mgmt_chan_list_lock);
792 
793 	return c;
794 }
795 
hci_mgmt_chan_register(struct hci_mgmt_chan * c)796 int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
797 {
798 	if (c->channel < HCI_CHANNEL_CONTROL)
799 		return -EINVAL;
800 
801 	mutex_lock(&mgmt_chan_list_lock);
802 	if (__hci_mgmt_chan_find(c->channel)) {
803 		mutex_unlock(&mgmt_chan_list_lock);
804 		return -EALREADY;
805 	}
806 
807 	list_add_tail(&c->list, &mgmt_chan_list);
808 
809 	mutex_unlock(&mgmt_chan_list_lock);
810 
811 	return 0;
812 }
813 EXPORT_SYMBOL(hci_mgmt_chan_register);
814 
hci_mgmt_chan_unregister(struct hci_mgmt_chan * c)815 void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
816 {
817 	mutex_lock(&mgmt_chan_list_lock);
818 	list_del(&c->list);
819 	mutex_unlock(&mgmt_chan_list_lock);
820 }
821 EXPORT_SYMBOL(hci_mgmt_chan_unregister);
822 
hci_sock_release(struct socket * sock)823 static int hci_sock_release(struct socket *sock)
824 {
825 	struct sock *sk = sock->sk;
826 	struct hci_dev *hdev;
827 	struct sk_buff *skb;
828 
829 	BT_DBG("sock %p sk %p", sock, sk);
830 
831 	if (!sk)
832 		return 0;
833 
834 	switch (hci_pi(sk)->channel) {
835 	case HCI_CHANNEL_MONITOR:
836 		atomic_dec(&monitor_promisc);
837 		break;
838 	case HCI_CHANNEL_RAW:
839 	case HCI_CHANNEL_USER:
840 	case HCI_CHANNEL_CONTROL:
841 		/* Send event to monitor */
842 		skb = create_monitor_ctrl_close(sk);
843 		if (skb) {
844 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
845 					    HCI_SOCK_TRUSTED, NULL);
846 			kfree_skb(skb);
847 		}
848 
849 		hci_sock_free_cookie(sk);
850 		break;
851 	}
852 
853 	bt_sock_unlink(&hci_sk_list, sk);
854 
855 	hdev = hci_pi(sk)->hdev;
856 	if (hdev) {
857 		if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
858 			/* When releasing a user channel exclusive access,
859 			 * call hci_dev_do_close directly instead of calling
860 			 * hci_dev_close to ensure the exclusive access will
861 			 * be released and the controller brought back down.
862 			 *
863 			 * The checking of HCI_AUTO_OFF is not needed in this
864 			 * case since it will have been cleared already when
865 			 * opening the user channel.
866 			 */
867 			hci_dev_do_close(hdev);
868 			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
869 			mgmt_index_added(hdev);
870 		}
871 
872 		atomic_dec(&hdev->promisc);
873 		hci_dev_put(hdev);
874 	}
875 
876 	sock_orphan(sk);
877 
878 	skb_queue_purge(&sk->sk_receive_queue);
879 	skb_queue_purge(&sk->sk_write_queue);
880 
881 	sock_put(sk);
882 	return 0;
883 }
884 
hci_sock_blacklist_add(struct hci_dev * hdev,void __user * arg)885 static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
886 {
887 	bdaddr_t bdaddr;
888 	int err;
889 
890 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
891 		return -EFAULT;
892 
893 	hci_dev_lock(hdev);
894 
895 	err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
896 
897 	hci_dev_unlock(hdev);
898 
899 	return err;
900 }
901 
hci_sock_blacklist_del(struct hci_dev * hdev,void __user * arg)902 static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
903 {
904 	bdaddr_t bdaddr;
905 	int err;
906 
907 	if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
908 		return -EFAULT;
909 
910 	hci_dev_lock(hdev);
911 
912 	err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
913 
914 	hci_dev_unlock(hdev);
915 
916 	return err;
917 }
918 
919 /* Ioctls that require bound socket */
hci_sock_bound_ioctl(struct sock * sk,unsigned int cmd,unsigned long arg)920 static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
921 				unsigned long arg)
922 {
923 	struct hci_dev *hdev = hci_pi(sk)->hdev;
924 
925 	if (!hdev)
926 		return -EBADFD;
927 
928 	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
929 		return -EBUSY;
930 
931 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
932 		return -EOPNOTSUPP;
933 
934 	if (hdev->dev_type != HCI_PRIMARY)
935 		return -EOPNOTSUPP;
936 
937 	switch (cmd) {
938 	case HCISETRAW:
939 		if (!capable(CAP_NET_ADMIN))
940 			return -EPERM;
941 		return -EOPNOTSUPP;
942 
943 	case HCIGETCONNINFO:
944 		return hci_get_conn_info(hdev, (void __user *)arg);
945 
946 	case HCIGETAUTHINFO:
947 		return hci_get_auth_info(hdev, (void __user *)arg);
948 
949 	case HCIBLOCKADDR:
950 		if (!capable(CAP_NET_ADMIN))
951 			return -EPERM;
952 		return hci_sock_blacklist_add(hdev, (void __user *)arg);
953 
954 	case HCIUNBLOCKADDR:
955 		if (!capable(CAP_NET_ADMIN))
956 			return -EPERM;
957 		return hci_sock_blacklist_del(hdev, (void __user *)arg);
958 	}
959 
960 	return -ENOIOCTLCMD;
961 }
962 
hci_sock_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)963 static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
964 			  unsigned long arg)
965 {
966 	void __user *argp = (void __user *)arg;
967 	struct sock *sk = sock->sk;
968 	int err;
969 
970 	BT_DBG("cmd %x arg %lx", cmd, arg);
971 
972 	lock_sock(sk);
973 
974 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
975 		err = -EBADFD;
976 		goto done;
977 	}
978 
979 	/* When calling an ioctl on an unbound raw socket, then ensure
980 	 * that the monitor gets informed. Ensure that the resulting event
981 	 * is only send once by checking if the cookie exists or not. The
982 	 * socket cookie will be only ever generated once for the lifetime
983 	 * of a given socket.
984 	 */
985 	if (hci_sock_gen_cookie(sk)) {
986 		struct sk_buff *skb;
987 
988 		if (capable(CAP_NET_ADMIN))
989 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
990 
991 		/* Send event to monitor */
992 		skb = create_monitor_ctrl_open(sk);
993 		if (skb) {
994 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
995 					    HCI_SOCK_TRUSTED, NULL);
996 			kfree_skb(skb);
997 		}
998 	}
999 
1000 	release_sock(sk);
1001 
1002 	switch (cmd) {
1003 	case HCIGETDEVLIST:
1004 		return hci_get_dev_list(argp);
1005 
1006 	case HCIGETDEVINFO:
1007 		return hci_get_dev_info(argp);
1008 
1009 	case HCIGETCONNLIST:
1010 		return hci_get_conn_list(argp);
1011 
1012 	case HCIDEVUP:
1013 		if (!capable(CAP_NET_ADMIN))
1014 			return -EPERM;
1015 		return hci_dev_open(arg);
1016 
1017 	case HCIDEVDOWN:
1018 		if (!capable(CAP_NET_ADMIN))
1019 			return -EPERM;
1020 		return hci_dev_close(arg);
1021 
1022 	case HCIDEVRESET:
1023 		if (!capable(CAP_NET_ADMIN))
1024 			return -EPERM;
1025 		return hci_dev_reset(arg);
1026 
1027 	case HCIDEVRESTAT:
1028 		if (!capable(CAP_NET_ADMIN))
1029 			return -EPERM;
1030 		return hci_dev_reset_stat(arg);
1031 
1032 	case HCISETSCAN:
1033 	case HCISETAUTH:
1034 	case HCISETENCRYPT:
1035 	case HCISETPTYPE:
1036 	case HCISETLINKPOL:
1037 	case HCISETLINKMODE:
1038 	case HCISETACLMTU:
1039 	case HCISETSCOMTU:
1040 		if (!capable(CAP_NET_ADMIN))
1041 			return -EPERM;
1042 		return hci_dev_cmd(cmd, argp);
1043 
1044 	case HCIINQUIRY:
1045 		return hci_inquiry(argp);
1046 	}
1047 
1048 	lock_sock(sk);
1049 
1050 	err = hci_sock_bound_ioctl(sk, cmd, arg);
1051 
1052 done:
1053 	release_sock(sk);
1054 	return err;
1055 }
1056 
hci_sock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)1057 static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
1058 			 int addr_len)
1059 {
1060 	struct sockaddr_hci haddr;
1061 	struct sock *sk = sock->sk;
1062 	struct hci_dev *hdev = NULL;
1063 	struct sk_buff *skb;
1064 	int len, err = 0;
1065 
1066 	BT_DBG("sock %p sk %p", sock, sk);
1067 
1068 	if (!addr)
1069 		return -EINVAL;
1070 
1071 	memset(&haddr, 0, sizeof(haddr));
1072 	len = min_t(unsigned int, sizeof(haddr), addr_len);
1073 	memcpy(&haddr, addr, len);
1074 
1075 	if (haddr.hci_family != AF_BLUETOOTH)
1076 		return -EINVAL;
1077 
1078 	lock_sock(sk);
1079 
1080 	if (sk->sk_state == BT_BOUND) {
1081 		err = -EALREADY;
1082 		goto done;
1083 	}
1084 
1085 	switch (haddr.hci_channel) {
1086 	case HCI_CHANNEL_RAW:
1087 		if (hci_pi(sk)->hdev) {
1088 			err = -EALREADY;
1089 			goto done;
1090 		}
1091 
1092 		if (haddr.hci_dev != HCI_DEV_NONE) {
1093 			hdev = hci_dev_get(haddr.hci_dev);
1094 			if (!hdev) {
1095 				err = -ENODEV;
1096 				goto done;
1097 			}
1098 
1099 			atomic_inc(&hdev->promisc);
1100 		}
1101 
1102 		hci_pi(sk)->channel = haddr.hci_channel;
1103 
1104 		if (!hci_sock_gen_cookie(sk)) {
1105 			/* In the case when a cookie has already been assigned,
1106 			 * then there has been already an ioctl issued against
1107 			 * an unbound socket and with that triggerd an open
1108 			 * notification. Send a close notification first to
1109 			 * allow the state transition to bounded.
1110 			 */
1111 			skb = create_monitor_ctrl_close(sk);
1112 			if (skb) {
1113 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1114 						    HCI_SOCK_TRUSTED, NULL);
1115 				kfree_skb(skb);
1116 			}
1117 		}
1118 
1119 		if (capable(CAP_NET_ADMIN))
1120 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1121 
1122 		hci_pi(sk)->hdev = hdev;
1123 
1124 		/* Send event to monitor */
1125 		skb = create_monitor_ctrl_open(sk);
1126 		if (skb) {
1127 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1128 					    HCI_SOCK_TRUSTED, NULL);
1129 			kfree_skb(skb);
1130 		}
1131 		break;
1132 
1133 	case HCI_CHANNEL_USER:
1134 		if (hci_pi(sk)->hdev) {
1135 			err = -EALREADY;
1136 			goto done;
1137 		}
1138 
1139 		if (haddr.hci_dev == HCI_DEV_NONE) {
1140 			err = -EINVAL;
1141 			goto done;
1142 		}
1143 
1144 		if (!capable(CAP_NET_ADMIN)) {
1145 			err = -EPERM;
1146 			goto done;
1147 		}
1148 
1149 		hdev = hci_dev_get(haddr.hci_dev);
1150 		if (!hdev) {
1151 			err = -ENODEV;
1152 			goto done;
1153 		}
1154 
1155 		if (test_bit(HCI_INIT, &hdev->flags) ||
1156 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
1157 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1158 		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1159 		     test_bit(HCI_UP, &hdev->flags))) {
1160 			err = -EBUSY;
1161 			hci_dev_put(hdev);
1162 			goto done;
1163 		}
1164 
1165 		if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
1166 			err = -EUSERS;
1167 			hci_dev_put(hdev);
1168 			goto done;
1169 		}
1170 
1171 		mgmt_index_removed(hdev);
1172 
1173 		err = hci_dev_open(hdev->id);
1174 		if (err) {
1175 			if (err == -EALREADY) {
1176 				/* In case the transport is already up and
1177 				 * running, clear the error here.
1178 				 *
1179 				 * This can happen when opening a user
1180 				 * channel and HCI_AUTO_OFF grace period
1181 				 * is still active.
1182 				 */
1183 				err = 0;
1184 			} else {
1185 				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
1186 				mgmt_index_added(hdev);
1187 				hci_dev_put(hdev);
1188 				goto done;
1189 			}
1190 		}
1191 
1192 		hci_pi(sk)->channel = haddr.hci_channel;
1193 
1194 		if (!hci_sock_gen_cookie(sk)) {
1195 			/* In the case when a cookie has already been assigned,
1196 			 * this socket will transition from a raw socket into
1197 			 * a user channel socket. For a clean transition, send
1198 			 * the close notification first.
1199 			 */
1200 			skb = create_monitor_ctrl_close(sk);
1201 			if (skb) {
1202 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1203 						    HCI_SOCK_TRUSTED, NULL);
1204 				kfree_skb(skb);
1205 			}
1206 		}
1207 
1208 		/* The user channel is restricted to CAP_NET_ADMIN
1209 		 * capabilities and with that implicitly trusted.
1210 		 */
1211 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1212 
1213 		hci_pi(sk)->hdev = hdev;
1214 
1215 		/* Send event to monitor */
1216 		skb = create_monitor_ctrl_open(sk);
1217 		if (skb) {
1218 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1219 					    HCI_SOCK_TRUSTED, NULL);
1220 			kfree_skb(skb);
1221 		}
1222 
1223 		atomic_inc(&hdev->promisc);
1224 		break;
1225 
1226 	case HCI_CHANNEL_MONITOR:
1227 		if (haddr.hci_dev != HCI_DEV_NONE) {
1228 			err = -EINVAL;
1229 			goto done;
1230 		}
1231 
1232 		if (!capable(CAP_NET_RAW)) {
1233 			err = -EPERM;
1234 			goto done;
1235 		}
1236 
1237 		hci_pi(sk)->channel = haddr.hci_channel;
1238 
1239 		/* The monitor interface is restricted to CAP_NET_RAW
1240 		 * capabilities and with that implicitly trusted.
1241 		 */
1242 		hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1243 
1244 		send_monitor_note(sk, "Linux version %s (%s)",
1245 				  init_utsname()->release,
1246 				  init_utsname()->machine);
1247 		send_monitor_note(sk, "Bluetooth subsystem version %u.%u",
1248 				  BT_SUBSYS_VERSION, BT_SUBSYS_REVISION);
1249 		send_monitor_replay(sk);
1250 		send_monitor_control_replay(sk);
1251 
1252 		atomic_inc(&monitor_promisc);
1253 		break;
1254 
1255 	case HCI_CHANNEL_LOGGING:
1256 		if (haddr.hci_dev != HCI_DEV_NONE) {
1257 			err = -EINVAL;
1258 			goto done;
1259 		}
1260 
1261 		if (!capable(CAP_NET_ADMIN)) {
1262 			err = -EPERM;
1263 			goto done;
1264 		}
1265 
1266 		hci_pi(sk)->channel = haddr.hci_channel;
1267 		break;
1268 
1269 	default:
1270 		if (!hci_mgmt_chan_find(haddr.hci_channel)) {
1271 			err = -EINVAL;
1272 			goto done;
1273 		}
1274 
1275 		if (haddr.hci_dev != HCI_DEV_NONE) {
1276 			err = -EINVAL;
1277 			goto done;
1278 		}
1279 
1280 		/* Users with CAP_NET_ADMIN capabilities are allowed
1281 		 * access to all management commands and events. For
1282 		 * untrusted users the interface is restricted and
1283 		 * also only untrusted events are sent.
1284 		 */
1285 		if (capable(CAP_NET_ADMIN))
1286 			hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
1287 
1288 		hci_pi(sk)->channel = haddr.hci_channel;
1289 
1290 		/* At the moment the index and unconfigured index events
1291 		 * are enabled unconditionally. Setting them on each
1292 		 * socket when binding keeps this functionality. They
1293 		 * however might be cleared later and then sending of these
1294 		 * events will be disabled, but that is then intentional.
1295 		 *
1296 		 * This also enables generic events that are safe to be
1297 		 * received by untrusted users. Example for such events
1298 		 * are changes to settings, class of device, name etc.
1299 		 */
1300 		if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) {
1301 			if (!hci_sock_gen_cookie(sk)) {
1302 				/* In the case when a cookie has already been
1303 				 * assigned, this socket will transtion from
1304 				 * a raw socket into a control socket. To
1305 				 * allow for a clean transtion, send the
1306 				 * close notification first.
1307 				 */
1308 				skb = create_monitor_ctrl_close(sk);
1309 				if (skb) {
1310 					hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1311 							    HCI_SOCK_TRUSTED, NULL);
1312 					kfree_skb(skb);
1313 				}
1314 			}
1315 
1316 			/* Send event to monitor */
1317 			skb = create_monitor_ctrl_open(sk);
1318 			if (skb) {
1319 				hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1320 						    HCI_SOCK_TRUSTED, NULL);
1321 				kfree_skb(skb);
1322 			}
1323 
1324 			hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
1325 			hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
1326 			hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS);
1327 			hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS);
1328 			hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1329 			hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1330 		}
1331 		break;
1332 	}
1333 
1334 	sk->sk_state = BT_BOUND;
1335 
1336 done:
1337 	release_sock(sk);
1338 	return err;
1339 }
1340 
hci_sock_getname(struct socket * sock,struct sockaddr * addr,int peer)1341 static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
1342 			    int peer)
1343 {
1344 	struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr;
1345 	struct sock *sk = sock->sk;
1346 	struct hci_dev *hdev;
1347 	int err = 0;
1348 
1349 	BT_DBG("sock %p sk %p", sock, sk);
1350 
1351 	if (peer)
1352 		return -EOPNOTSUPP;
1353 
1354 	lock_sock(sk);
1355 
1356 	hdev = hci_pi(sk)->hdev;
1357 	if (!hdev) {
1358 		err = -EBADFD;
1359 		goto done;
1360 	}
1361 
1362 	haddr->hci_family = AF_BLUETOOTH;
1363 	haddr->hci_dev    = hdev->id;
1364 	haddr->hci_channel= hci_pi(sk)->channel;
1365 	err = sizeof(*haddr);
1366 
1367 done:
1368 	release_sock(sk);
1369 	return err;
1370 }
1371 
hci_sock_cmsg(struct sock * sk,struct msghdr * msg,struct sk_buff * skb)1372 static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
1373 			  struct sk_buff *skb)
1374 {
1375 	__u32 mask = hci_pi(sk)->cmsg_mask;
1376 
1377 	if (mask & HCI_CMSG_DIR) {
1378 		int incoming = bt_cb(skb)->incoming;
1379 		put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
1380 			 &incoming);
1381 	}
1382 
1383 	if (mask & HCI_CMSG_TSTAMP) {
1384 #ifdef CONFIG_COMPAT
1385 		struct old_timeval32 ctv;
1386 #endif
1387 		struct __kernel_old_timeval tv;
1388 		void *data;
1389 		int len;
1390 
1391 		skb_get_timestamp(skb, &tv);
1392 
1393 		data = &tv;
1394 		len = sizeof(tv);
1395 #ifdef CONFIG_COMPAT
1396 		if (!COMPAT_USE_64BIT_TIME &&
1397 		    (msg->msg_flags & MSG_CMSG_COMPAT)) {
1398 			ctv.tv_sec = tv.tv_sec;
1399 			ctv.tv_usec = tv.tv_usec;
1400 			data = &ctv;
1401 			len = sizeof(ctv);
1402 		}
1403 #endif
1404 
1405 		put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
1406 	}
1407 }
1408 
hci_sock_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1409 static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1410 			    size_t len, int flags)
1411 {
1412 	int noblock = flags & MSG_DONTWAIT;
1413 	struct sock *sk = sock->sk;
1414 	struct sk_buff *skb;
1415 	int copied, err;
1416 	unsigned int skblen;
1417 
1418 	BT_DBG("sock %p, sk %p", sock, sk);
1419 
1420 	if (flags & MSG_OOB)
1421 		return -EOPNOTSUPP;
1422 
1423 	if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING)
1424 		return -EOPNOTSUPP;
1425 
1426 	if (sk->sk_state == BT_CLOSED)
1427 		return 0;
1428 
1429 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1430 	if (!skb)
1431 		return err;
1432 
1433 	skblen = skb->len;
1434 	copied = skb->len;
1435 	if (len < copied) {
1436 		msg->msg_flags |= MSG_TRUNC;
1437 		copied = len;
1438 	}
1439 
1440 	skb_reset_transport_header(skb);
1441 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
1442 
1443 	switch (hci_pi(sk)->channel) {
1444 	case HCI_CHANNEL_RAW:
1445 		hci_sock_cmsg(sk, msg, skb);
1446 		break;
1447 	case HCI_CHANNEL_USER:
1448 	case HCI_CHANNEL_MONITOR:
1449 		sock_recv_timestamp(msg, sk, skb);
1450 		break;
1451 	default:
1452 		if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1453 			sock_recv_timestamp(msg, sk, skb);
1454 		break;
1455 	}
1456 
1457 	skb_free_datagram(sk, skb);
1458 
1459 	if (flags & MSG_TRUNC)
1460 		copied = skblen;
1461 
1462 	return err ? : copied;
1463 }
1464 
hci_mgmt_cmd(struct hci_mgmt_chan * chan,struct sock * sk,struct msghdr * msg,size_t msglen)1465 static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1466 			struct msghdr *msg, size_t msglen)
1467 {
1468 	void *buf;
1469 	u8 *cp;
1470 	struct mgmt_hdr *hdr;
1471 	u16 opcode, index, len;
1472 	struct hci_dev *hdev = NULL;
1473 	const struct hci_mgmt_handler *handler;
1474 	bool var_len, no_hdev;
1475 	int err;
1476 
1477 	BT_DBG("got %zu bytes", msglen);
1478 
1479 	if (msglen < sizeof(*hdr))
1480 		return -EINVAL;
1481 
1482 	buf = kmalloc(msglen, GFP_KERNEL);
1483 	if (!buf)
1484 		return -ENOMEM;
1485 
1486 	if (memcpy_from_msg(buf, msg, msglen)) {
1487 		err = -EFAULT;
1488 		goto done;
1489 	}
1490 
1491 	hdr = buf;
1492 	opcode = __le16_to_cpu(hdr->opcode);
1493 	index = __le16_to_cpu(hdr->index);
1494 	len = __le16_to_cpu(hdr->len);
1495 
1496 	if (len != msglen - sizeof(*hdr)) {
1497 		err = -EINVAL;
1498 		goto done;
1499 	}
1500 
1501 	if (chan->channel == HCI_CHANNEL_CONTROL) {
1502 		struct sk_buff *skb;
1503 
1504 		/* Send event to monitor */
1505 		skb = create_monitor_ctrl_command(sk, index, opcode, len,
1506 						  buf + sizeof(*hdr));
1507 		if (skb) {
1508 			hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
1509 					    HCI_SOCK_TRUSTED, NULL);
1510 			kfree_skb(skb);
1511 		}
1512 	}
1513 
1514 	if (opcode >= chan->handler_count ||
1515 	    chan->handlers[opcode].func == NULL) {
1516 		BT_DBG("Unknown op %u", opcode);
1517 		err = mgmt_cmd_status(sk, index, opcode,
1518 				      MGMT_STATUS_UNKNOWN_COMMAND);
1519 		goto done;
1520 	}
1521 
1522 	handler = &chan->handlers[opcode];
1523 
1524 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1525 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1526 		err = mgmt_cmd_status(sk, index, opcode,
1527 				      MGMT_STATUS_PERMISSION_DENIED);
1528 		goto done;
1529 	}
1530 
1531 	if (index != MGMT_INDEX_NONE) {
1532 		hdev = hci_dev_get(index);
1533 		if (!hdev) {
1534 			err = mgmt_cmd_status(sk, index, opcode,
1535 					      MGMT_STATUS_INVALID_INDEX);
1536 			goto done;
1537 		}
1538 
1539 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1540 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
1541 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1542 			err = mgmt_cmd_status(sk, index, opcode,
1543 					      MGMT_STATUS_INVALID_INDEX);
1544 			goto done;
1545 		}
1546 
1547 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1548 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1549 			err = mgmt_cmd_status(sk, index, opcode,
1550 					      MGMT_STATUS_INVALID_INDEX);
1551 			goto done;
1552 		}
1553 	}
1554 
1555 	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1556 	if (no_hdev != !hdev) {
1557 		err = mgmt_cmd_status(sk, index, opcode,
1558 				      MGMT_STATUS_INVALID_INDEX);
1559 		goto done;
1560 	}
1561 
1562 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1563 	if ((var_len && len < handler->data_len) ||
1564 	    (!var_len && len != handler->data_len)) {
1565 		err = mgmt_cmd_status(sk, index, opcode,
1566 				      MGMT_STATUS_INVALID_PARAMS);
1567 		goto done;
1568 	}
1569 
1570 	if (hdev && chan->hdev_init)
1571 		chan->hdev_init(sk, hdev);
1572 
1573 	cp = buf + sizeof(*hdr);
1574 
1575 	err = handler->func(sk, hdev, cp, len);
1576 	if (err < 0)
1577 		goto done;
1578 
1579 	err = msglen;
1580 
1581 done:
1582 	if (hdev)
1583 		hci_dev_put(hdev);
1584 
1585 	kfree(buf);
1586 	return err;
1587 }
1588 
hci_logging_frame(struct sock * sk,struct msghdr * msg,int len)1589 static int hci_logging_frame(struct sock *sk, struct msghdr *msg, int len)
1590 {
1591 	struct hci_mon_hdr *hdr;
1592 	struct sk_buff *skb;
1593 	struct hci_dev *hdev;
1594 	u16 index;
1595 	int err;
1596 
1597 	/* The logging frame consists at minimum of the standard header,
1598 	 * the priority byte, the ident length byte and at least one string
1599 	 * terminator NUL byte. Anything shorter are invalid packets.
1600 	 */
1601 	if (len < sizeof(*hdr) + 3)
1602 		return -EINVAL;
1603 
1604 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1605 	if (!skb)
1606 		return err;
1607 
1608 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1609 		err = -EFAULT;
1610 		goto drop;
1611 	}
1612 
1613 	hdr = (void *)skb->data;
1614 
1615 	if (__le16_to_cpu(hdr->len) != len - sizeof(*hdr)) {
1616 		err = -EINVAL;
1617 		goto drop;
1618 	}
1619 
1620 	if (__le16_to_cpu(hdr->opcode) == 0x0000) {
1621 		__u8 priority = skb->data[sizeof(*hdr)];
1622 		__u8 ident_len = skb->data[sizeof(*hdr) + 1];
1623 
1624 		/* Only the priorities 0-7 are valid and with that any other
1625 		 * value results in an invalid packet.
1626 		 *
1627 		 * The priority byte is followed by an ident length byte and
1628 		 * the NUL terminated ident string. Check that the ident
1629 		 * length is not overflowing the packet and also that the
1630 		 * ident string itself is NUL terminated. In case the ident
1631 		 * length is zero, the length value actually doubles as NUL
1632 		 * terminator identifier.
1633 		 *
1634 		 * The message follows the ident string (if present) and
1635 		 * must be NUL terminated. Otherwise it is not a valid packet.
1636 		 */
1637 		if (priority > 7 || skb->data[len - 1] != 0x00 ||
1638 		    ident_len > len - sizeof(*hdr) - 3 ||
1639 		    skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) {
1640 			err = -EINVAL;
1641 			goto drop;
1642 		}
1643 	} else {
1644 		err = -EINVAL;
1645 		goto drop;
1646 	}
1647 
1648 	index = __le16_to_cpu(hdr->index);
1649 
1650 	if (index != MGMT_INDEX_NONE) {
1651 		hdev = hci_dev_get(index);
1652 		if (!hdev) {
1653 			err = -ENODEV;
1654 			goto drop;
1655 		}
1656 	} else {
1657 		hdev = NULL;
1658 	}
1659 
1660 	hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING);
1661 
1662 	hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL);
1663 	err = len;
1664 
1665 	if (hdev)
1666 		hci_dev_put(hdev);
1667 
1668 drop:
1669 	kfree_skb(skb);
1670 	return err;
1671 }
1672 
hci_sock_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1673 static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1674 			    size_t len)
1675 {
1676 	struct sock *sk = sock->sk;
1677 	struct hci_mgmt_chan *chan;
1678 	struct hci_dev *hdev;
1679 	struct sk_buff *skb;
1680 	int err;
1681 
1682 	BT_DBG("sock %p sk %p", sock, sk);
1683 
1684 	if (msg->msg_flags & MSG_OOB)
1685 		return -EOPNOTSUPP;
1686 
1687 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1688 			       MSG_CMSG_COMPAT))
1689 		return -EINVAL;
1690 
1691 	if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1692 		return -EINVAL;
1693 
1694 	lock_sock(sk);
1695 
1696 	switch (hci_pi(sk)->channel) {
1697 	case HCI_CHANNEL_RAW:
1698 	case HCI_CHANNEL_USER:
1699 		break;
1700 	case HCI_CHANNEL_MONITOR:
1701 		err = -EOPNOTSUPP;
1702 		goto done;
1703 	case HCI_CHANNEL_LOGGING:
1704 		err = hci_logging_frame(sk, msg, len);
1705 		goto done;
1706 	default:
1707 		mutex_lock(&mgmt_chan_list_lock);
1708 		chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1709 		if (chan)
1710 			err = hci_mgmt_cmd(chan, sk, msg, len);
1711 		else
1712 			err = -EINVAL;
1713 
1714 		mutex_unlock(&mgmt_chan_list_lock);
1715 		goto done;
1716 	}
1717 
1718 	hdev = hci_pi(sk)->hdev;
1719 	if (!hdev) {
1720 		err = -EBADFD;
1721 		goto done;
1722 	}
1723 
1724 	if (!test_bit(HCI_UP, &hdev->flags)) {
1725 		err = -ENETDOWN;
1726 		goto done;
1727 	}
1728 
1729 	skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1730 	if (!skb)
1731 		goto done;
1732 
1733 	if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1734 		err = -EFAULT;
1735 		goto drop;
1736 	}
1737 
1738 	hci_skb_pkt_type(skb) = skb->data[0];
1739 	skb_pull(skb, 1);
1740 
1741 	if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1742 		/* No permission check is needed for user channel
1743 		 * since that gets enforced when binding the socket.
1744 		 *
1745 		 * However check that the packet type is valid.
1746 		 */
1747 		if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT &&
1748 		    hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1749 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1750 			err = -EINVAL;
1751 			goto drop;
1752 		}
1753 
1754 		skb_queue_tail(&hdev->raw_q, skb);
1755 		queue_work(hdev->workqueue, &hdev->tx_work);
1756 	} else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) {
1757 		u16 opcode = get_unaligned_le16(skb->data);
1758 		u16 ogf = hci_opcode_ogf(opcode);
1759 		u16 ocf = hci_opcode_ocf(opcode);
1760 
1761 		if (((ogf > HCI_SFLT_MAX_OGF) ||
1762 		     !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1763 				   &hci_sec_filter.ocf_mask[ogf])) &&
1764 		    !capable(CAP_NET_RAW)) {
1765 			err = -EPERM;
1766 			goto drop;
1767 		}
1768 
1769 		/* Since the opcode has already been extracted here, store
1770 		 * a copy of the value for later use by the drivers.
1771 		 */
1772 		hci_skb_opcode(skb) = opcode;
1773 
1774 		if (ogf == 0x3f) {
1775 			skb_queue_tail(&hdev->raw_q, skb);
1776 			queue_work(hdev->workqueue, &hdev->tx_work);
1777 		} else {
1778 			/* Stand-alone HCI commands must be flagged as
1779 			 * single-command requests.
1780 			 */
1781 			bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
1782 
1783 			skb_queue_tail(&hdev->cmd_q, skb);
1784 			queue_work(hdev->workqueue, &hdev->cmd_work);
1785 		}
1786 	} else {
1787 		if (!capable(CAP_NET_RAW)) {
1788 			err = -EPERM;
1789 			goto drop;
1790 		}
1791 
1792 		if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
1793 		    hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
1794 			err = -EINVAL;
1795 			goto drop;
1796 		}
1797 
1798 		skb_queue_tail(&hdev->raw_q, skb);
1799 		queue_work(hdev->workqueue, &hdev->tx_work);
1800 	}
1801 
1802 	err = len;
1803 
1804 done:
1805 	release_sock(sk);
1806 	return err;
1807 
1808 drop:
1809 	kfree_skb(skb);
1810 	goto done;
1811 }
1812 
hci_sock_setsockopt(struct socket * sock,int level,int optname,char __user * optval,unsigned int len)1813 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1814 			       char __user *optval, unsigned int len)
1815 {
1816 	struct hci_ufilter uf = { .opcode = 0 };
1817 	struct sock *sk = sock->sk;
1818 	int err = 0, opt = 0;
1819 
1820 	BT_DBG("sk %p, opt %d", sk, optname);
1821 
1822 	if (level != SOL_HCI)
1823 		return -ENOPROTOOPT;
1824 
1825 	lock_sock(sk);
1826 
1827 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1828 		err = -EBADFD;
1829 		goto done;
1830 	}
1831 
1832 	switch (optname) {
1833 	case HCI_DATA_DIR:
1834 		if (get_user(opt, (int __user *)optval)) {
1835 			err = -EFAULT;
1836 			break;
1837 		}
1838 
1839 		if (opt)
1840 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1841 		else
1842 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1843 		break;
1844 
1845 	case HCI_TIME_STAMP:
1846 		if (get_user(opt, (int __user *)optval)) {
1847 			err = -EFAULT;
1848 			break;
1849 		}
1850 
1851 		if (opt)
1852 			hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1853 		else
1854 			hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1855 		break;
1856 
1857 	case HCI_FILTER:
1858 		{
1859 			struct hci_filter *f = &hci_pi(sk)->filter;
1860 
1861 			uf.type_mask = f->type_mask;
1862 			uf.opcode    = f->opcode;
1863 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1864 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1865 		}
1866 
1867 		len = min_t(unsigned int, len, sizeof(uf));
1868 		if (copy_from_user(&uf, optval, len)) {
1869 			err = -EFAULT;
1870 			break;
1871 		}
1872 
1873 		if (!capable(CAP_NET_RAW)) {
1874 			uf.type_mask &= hci_sec_filter.type_mask;
1875 			uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1876 			uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1877 		}
1878 
1879 		{
1880 			struct hci_filter *f = &hci_pi(sk)->filter;
1881 
1882 			f->type_mask = uf.type_mask;
1883 			f->opcode    = uf.opcode;
1884 			*((u32 *) f->event_mask + 0) = uf.event_mask[0];
1885 			*((u32 *) f->event_mask + 1) = uf.event_mask[1];
1886 		}
1887 		break;
1888 
1889 	default:
1890 		err = -ENOPROTOOPT;
1891 		break;
1892 	}
1893 
1894 done:
1895 	release_sock(sk);
1896 	return err;
1897 }
1898 
hci_sock_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1899 static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1900 			       char __user *optval, int __user *optlen)
1901 {
1902 	struct hci_ufilter uf;
1903 	struct sock *sk = sock->sk;
1904 	int len, opt, err = 0;
1905 
1906 	BT_DBG("sk %p, opt %d", sk, optname);
1907 
1908 	if (level != SOL_HCI)
1909 		return -ENOPROTOOPT;
1910 
1911 	if (get_user(len, optlen))
1912 		return -EFAULT;
1913 
1914 	lock_sock(sk);
1915 
1916 	if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1917 		err = -EBADFD;
1918 		goto done;
1919 	}
1920 
1921 	switch (optname) {
1922 	case HCI_DATA_DIR:
1923 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1924 			opt = 1;
1925 		else
1926 			opt = 0;
1927 
1928 		if (put_user(opt, optval))
1929 			err = -EFAULT;
1930 		break;
1931 
1932 	case HCI_TIME_STAMP:
1933 		if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1934 			opt = 1;
1935 		else
1936 			opt = 0;
1937 
1938 		if (put_user(opt, optval))
1939 			err = -EFAULT;
1940 		break;
1941 
1942 	case HCI_FILTER:
1943 		{
1944 			struct hci_filter *f = &hci_pi(sk)->filter;
1945 
1946 			memset(&uf, 0, sizeof(uf));
1947 			uf.type_mask = f->type_mask;
1948 			uf.opcode    = f->opcode;
1949 			uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1950 			uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1951 		}
1952 
1953 		len = min_t(unsigned int, len, sizeof(uf));
1954 		if (copy_to_user(optval, &uf, len))
1955 			err = -EFAULT;
1956 		break;
1957 
1958 	default:
1959 		err = -ENOPROTOOPT;
1960 		break;
1961 	}
1962 
1963 done:
1964 	release_sock(sk);
1965 	return err;
1966 }
1967 
1968 static const struct proto_ops hci_sock_ops = {
1969 	.family		= PF_BLUETOOTH,
1970 	.owner		= THIS_MODULE,
1971 	.release	= hci_sock_release,
1972 	.bind		= hci_sock_bind,
1973 	.getname	= hci_sock_getname,
1974 	.sendmsg	= hci_sock_sendmsg,
1975 	.recvmsg	= hci_sock_recvmsg,
1976 	.ioctl		= hci_sock_ioctl,
1977 	.poll		= datagram_poll,
1978 	.listen		= sock_no_listen,
1979 	.shutdown	= sock_no_shutdown,
1980 	.setsockopt	= hci_sock_setsockopt,
1981 	.getsockopt	= hci_sock_getsockopt,
1982 	.connect	= sock_no_connect,
1983 	.socketpair	= sock_no_socketpair,
1984 	.accept		= sock_no_accept,
1985 	.mmap		= sock_no_mmap
1986 };
1987 
1988 static struct proto hci_sk_proto = {
1989 	.name		= "HCI",
1990 	.owner		= THIS_MODULE,
1991 	.obj_size	= sizeof(struct hci_pinfo)
1992 };
1993 
hci_sock_create(struct net * net,struct socket * sock,int protocol,int kern)1994 static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1995 			   int kern)
1996 {
1997 	struct sock *sk;
1998 
1999 	BT_DBG("sock %p", sock);
2000 
2001 	if (sock->type != SOCK_RAW)
2002 		return -ESOCKTNOSUPPORT;
2003 
2004 	sock->ops = &hci_sock_ops;
2005 
2006 	sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
2007 	if (!sk)
2008 		return -ENOMEM;
2009 
2010 	sock_init_data(sock, sk);
2011 
2012 	sock_reset_flag(sk, SOCK_ZAPPED);
2013 
2014 	sk->sk_protocol = protocol;
2015 
2016 	sock->state = SS_UNCONNECTED;
2017 	sk->sk_state = BT_OPEN;
2018 
2019 	bt_sock_link(&hci_sk_list, sk);
2020 	return 0;
2021 }
2022 
2023 static const struct net_proto_family hci_sock_family_ops = {
2024 	.family	= PF_BLUETOOTH,
2025 	.owner	= THIS_MODULE,
2026 	.create	= hci_sock_create,
2027 };
2028 
hci_sock_init(void)2029 int __init hci_sock_init(void)
2030 {
2031 	int err;
2032 
2033 	BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
2034 
2035 	err = proto_register(&hci_sk_proto, 0);
2036 	if (err < 0)
2037 		return err;
2038 
2039 	err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
2040 	if (err < 0) {
2041 		BT_ERR("HCI socket registration failed");
2042 		goto error;
2043 	}
2044 
2045 	err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
2046 	if (err < 0) {
2047 		BT_ERR("Failed to create HCI proc file");
2048 		bt_sock_unregister(BTPROTO_HCI);
2049 		goto error;
2050 	}
2051 
2052 	BT_INFO("HCI socket layer initialized");
2053 
2054 	return 0;
2055 
2056 error:
2057 	proto_unregister(&hci_sk_proto);
2058 	return err;
2059 }
2060 
hci_sock_cleanup(void)2061 void hci_sock_cleanup(void)
2062 {
2063 	bt_procfs_cleanup(&init_net, "hci");
2064 	bt_sock_unregister(BTPROTO_HCI);
2065 	proto_unregister(&hci_sk_proto);
2066 }
2067