• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5  * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6  * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
7  * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/types.h>
16 #include <linux/socket.h>
17 #include <linux/in.h>
18 #include <linux/slab.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/spinlock.h>
22 #include <linux/timer.h>
23 #include <linux/string.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/stat.h>
27 #include <net/net_namespace.h>
28 #include <net/ax25.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/uaccess.h>
35 #include <linux/fcntl.h>
36 #include <linux/termios.h>
37 #include <linux/mm.h>
38 #include <linux/interrupt.h>
39 #include <linux/notifier.h>
40 #include <net/rose.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <net/tcp_states.h>
44 #include <net/ip.h>
45 #include <net/arp.h>
46 
47 static int rose_ndevs = 10;
48 
49 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
50 int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
51 int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
52 int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
53 int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
54 int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
55 int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
56 int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
57 int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
58 int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
59 
60 static HLIST_HEAD(rose_list);
61 static DEFINE_SPINLOCK(rose_list_lock);
62 
63 static const struct proto_ops rose_proto_ops;
64 
65 ax25_address rose_callsign;
66 
67 /*
68  * ROSE network devices are virtual network devices encapsulating ROSE
69  * frames into AX.25 which will be sent through an AX.25 device, so form a
70  * special "super class" of normal net devices; split their locks off into a
71  * separate class since they always nest.
72  */
73 static struct lock_class_key rose_netdev_xmit_lock_key;
74 static struct lock_class_key rose_netdev_addr_lock_key;
75 
rose_set_lockdep_one(struct net_device * dev,struct netdev_queue * txq,void * _unused)76 static void rose_set_lockdep_one(struct net_device *dev,
77 				 struct netdev_queue *txq,
78 				 void *_unused)
79 {
80 	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
81 }
82 
rose_set_lockdep_key(struct net_device * dev)83 static void rose_set_lockdep_key(struct net_device *dev)
84 {
85 	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
86 	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
87 }
88 
89 /*
90  *	Convert a ROSE address into text.
91  */
rose2asc(char * buf,const rose_address * addr)92 char *rose2asc(char *buf, const rose_address *addr)
93 {
94 	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
95 	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
96 	    addr->rose_addr[4] == 0x00) {
97 		strcpy(buf, "*");
98 	} else {
99 		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
100 						addr->rose_addr[1] & 0xFF,
101 						addr->rose_addr[2] & 0xFF,
102 						addr->rose_addr[3] & 0xFF,
103 						addr->rose_addr[4] & 0xFF);
104 	}
105 
106 	return buf;
107 }
108 
109 /*
110  *	Compare two ROSE addresses, 0 == equal.
111  */
rosecmp(rose_address * addr1,rose_address * addr2)112 int rosecmp(rose_address *addr1, rose_address *addr2)
113 {
114 	int i;
115 
116 	for (i = 0; i < 5; i++)
117 		if (addr1->rose_addr[i] != addr2->rose_addr[i])
118 			return 1;
119 
120 	return 0;
121 }
122 
123 /*
124  *	Compare two ROSE addresses for only mask digits, 0 == equal.
125  */
rosecmpm(rose_address * addr1,rose_address * addr2,unsigned short mask)126 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
127 {
128 	unsigned int i, j;
129 
130 	if (mask > 10)
131 		return 1;
132 
133 	for (i = 0; i < mask; i++) {
134 		j = i / 2;
135 
136 		if ((i % 2) != 0) {
137 			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
138 				return 1;
139 		} else {
140 			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
141 				return 1;
142 		}
143 	}
144 
145 	return 0;
146 }
147 
148 /*
149  *	Socket removal during an interrupt is now safe.
150  */
rose_remove_socket(struct sock * sk)151 static void rose_remove_socket(struct sock *sk)
152 {
153 	spin_lock_bh(&rose_list_lock);
154 	sk_del_node_init(sk);
155 	spin_unlock_bh(&rose_list_lock);
156 }
157 
158 /*
159  *	Kill all bound sockets on a broken link layer connection to a
160  *	particular neighbour.
161  */
rose_kill_by_neigh(struct rose_neigh * neigh)162 void rose_kill_by_neigh(struct rose_neigh *neigh)
163 {
164 	struct sock *s;
165 
166 	spin_lock_bh(&rose_list_lock);
167 	sk_for_each(s, &rose_list) {
168 		struct rose_sock *rose = rose_sk(s);
169 
170 		if (rose->neighbour == neigh) {
171 			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
172 			rose->neighbour->use--;
173 			rose->neighbour = NULL;
174 		}
175 	}
176 	spin_unlock_bh(&rose_list_lock);
177 }
178 
179 /*
180  *	Kill all bound sockets on a dropped device.
181  */
rose_kill_by_device(struct net_device * dev)182 static void rose_kill_by_device(struct net_device *dev)
183 {
184 	struct sock *sk, *array[16];
185 	struct rose_sock *rose;
186 	bool rescan;
187 	int i, cnt;
188 
189 start:
190 	rescan = false;
191 	cnt = 0;
192 	spin_lock_bh(&rose_list_lock);
193 	sk_for_each(sk, &rose_list) {
194 		rose = rose_sk(sk);
195 		if (rose->device == dev) {
196 			if (cnt == ARRAY_SIZE(array)) {
197 				rescan = true;
198 				break;
199 			}
200 			sock_hold(sk);
201 			array[cnt++] = sk;
202 		}
203 	}
204 	spin_unlock_bh(&rose_list_lock);
205 
206 	for (i = 0; i < cnt; i++) {
207 		sk = array[cnt];
208 		rose = rose_sk(sk);
209 		lock_sock(sk);
210 		spin_lock_bh(&rose_list_lock);
211 		if (rose->device == dev) {
212 			rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
213 			if (rose->neighbour)
214 				rose->neighbour->use--;
215 			dev_put(rose->device);
216 			rose->device = NULL;
217 		}
218 		spin_unlock_bh(&rose_list_lock);
219 		release_sock(sk);
220 		sock_put(sk);
221 		cond_resched();
222 	}
223 	if (rescan)
224 		goto start;
225 }
226 
227 /*
228  *	Handle device status changes.
229  */
rose_device_event(struct notifier_block * this,unsigned long event,void * ptr)230 static int rose_device_event(struct notifier_block *this,
231 			     unsigned long event, void *ptr)
232 {
233 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
234 
235 	if (!net_eq(dev_net(dev), &init_net))
236 		return NOTIFY_DONE;
237 
238 	if (event != NETDEV_DOWN)
239 		return NOTIFY_DONE;
240 
241 	switch (dev->type) {
242 	case ARPHRD_ROSE:
243 		rose_kill_by_device(dev);
244 		break;
245 	case ARPHRD_AX25:
246 		rose_link_device_down(dev);
247 		rose_rt_device_down(dev);
248 		break;
249 	}
250 
251 	return NOTIFY_DONE;
252 }
253 
254 /*
255  *	Add a socket to the bound sockets list.
256  */
rose_insert_socket(struct sock * sk)257 static void rose_insert_socket(struct sock *sk)
258 {
259 
260 	spin_lock_bh(&rose_list_lock);
261 	sk_add_node(sk, &rose_list);
262 	spin_unlock_bh(&rose_list_lock);
263 }
264 
265 /*
266  *	Find a socket that wants to accept the Call Request we just
267  *	received.
268  */
rose_find_listener(rose_address * addr,ax25_address * call)269 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
270 {
271 	struct sock *s;
272 
273 	spin_lock_bh(&rose_list_lock);
274 	sk_for_each(s, &rose_list) {
275 		struct rose_sock *rose = rose_sk(s);
276 
277 		if (!rosecmp(&rose->source_addr, addr) &&
278 		    !ax25cmp(&rose->source_call, call) &&
279 		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
280 			goto found;
281 	}
282 
283 	sk_for_each(s, &rose_list) {
284 		struct rose_sock *rose = rose_sk(s);
285 
286 		if (!rosecmp(&rose->source_addr, addr) &&
287 		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
288 		    s->sk_state == TCP_LISTEN)
289 			goto found;
290 	}
291 	s = NULL;
292 found:
293 	spin_unlock_bh(&rose_list_lock);
294 	return s;
295 }
296 
297 /*
298  *	Find a connected ROSE socket given my LCI and device.
299  */
rose_find_socket(unsigned int lci,struct rose_neigh * neigh)300 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
301 {
302 	struct sock *s;
303 
304 	spin_lock_bh(&rose_list_lock);
305 	sk_for_each(s, &rose_list) {
306 		struct rose_sock *rose = rose_sk(s);
307 
308 		if (rose->lci == lci && rose->neighbour == neigh)
309 			goto found;
310 	}
311 	s = NULL;
312 found:
313 	spin_unlock_bh(&rose_list_lock);
314 	return s;
315 }
316 
317 /*
318  *	Find a unique LCI for a given device.
319  */
rose_new_lci(struct rose_neigh * neigh)320 unsigned int rose_new_lci(struct rose_neigh *neigh)
321 {
322 	int lci;
323 
324 	if (neigh->dce_mode) {
325 		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
326 			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
327 				return lci;
328 	} else {
329 		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
330 			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
331 				return lci;
332 	}
333 
334 	return 0;
335 }
336 
337 /*
338  *	Deferred destroy.
339  */
340 void rose_destroy_socket(struct sock *);
341 
342 /*
343  *	Handler for deferred kills.
344  */
rose_destroy_timer(struct timer_list * t)345 static void rose_destroy_timer(struct timer_list *t)
346 {
347 	struct sock *sk = from_timer(sk, t, sk_timer);
348 
349 	rose_destroy_socket(sk);
350 }
351 
352 /*
353  *	This is called from user mode and the timers. Thus it protects itself
354  *	against interrupt users but doesn't worry about being called during
355  *	work.  Once it is removed from the queue no interrupt or bottom half
356  *	will touch it and we are (fairly 8-) ) safe.
357  */
rose_destroy_socket(struct sock * sk)358 void rose_destroy_socket(struct sock *sk)
359 {
360 	struct sk_buff *skb;
361 
362 	rose_remove_socket(sk);
363 	rose_stop_heartbeat(sk);
364 	rose_stop_idletimer(sk);
365 	rose_stop_timer(sk);
366 
367 	rose_clear_queues(sk);		/* Flush the queues */
368 
369 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
370 		if (skb->sk != sk) {	/* A pending connection */
371 			/* Queue the unaccepted socket for death */
372 			sock_set_flag(skb->sk, SOCK_DEAD);
373 			rose_start_heartbeat(skb->sk);
374 			rose_sk(skb->sk)->state = ROSE_STATE_0;
375 		}
376 
377 		kfree_skb(skb);
378 	}
379 
380 	if (sk_has_allocations(sk)) {
381 		/* Defer: outstanding buffers */
382 		timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
383 		sk->sk_timer.expires  = jiffies + 10 * HZ;
384 		add_timer(&sk->sk_timer);
385 	} else
386 		sock_put(sk);
387 }
388 
389 /*
390  *	Handling for system calls applied via the various interfaces to a
391  *	ROSE socket object.
392  */
393 
rose_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)394 static int rose_setsockopt(struct socket *sock, int level, int optname,
395 		sockptr_t optval, unsigned int optlen)
396 {
397 	struct sock *sk = sock->sk;
398 	struct rose_sock *rose = rose_sk(sk);
399 	int opt;
400 
401 	if (level != SOL_ROSE)
402 		return -ENOPROTOOPT;
403 
404 	if (optlen < sizeof(int))
405 		return -EINVAL;
406 
407 	if (copy_from_sockptr(&opt, optval, sizeof(int)))
408 		return -EFAULT;
409 
410 	switch (optname) {
411 	case ROSE_DEFER:
412 		rose->defer = opt ? 1 : 0;
413 		return 0;
414 
415 	case ROSE_T1:
416 		if (opt < 1)
417 			return -EINVAL;
418 		rose->t1 = opt * HZ;
419 		return 0;
420 
421 	case ROSE_T2:
422 		if (opt < 1)
423 			return -EINVAL;
424 		rose->t2 = opt * HZ;
425 		return 0;
426 
427 	case ROSE_T3:
428 		if (opt < 1)
429 			return -EINVAL;
430 		rose->t3 = opt * HZ;
431 		return 0;
432 
433 	case ROSE_HOLDBACK:
434 		if (opt < 1)
435 			return -EINVAL;
436 		rose->hb = opt * HZ;
437 		return 0;
438 
439 	case ROSE_IDLE:
440 		if (opt < 0)
441 			return -EINVAL;
442 		rose->idle = opt * 60 * HZ;
443 		return 0;
444 
445 	case ROSE_QBITINCL:
446 		rose->qbitincl = opt ? 1 : 0;
447 		return 0;
448 
449 	default:
450 		return -ENOPROTOOPT;
451 	}
452 }
453 
rose_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)454 static int rose_getsockopt(struct socket *sock, int level, int optname,
455 	char __user *optval, int __user *optlen)
456 {
457 	struct sock *sk = sock->sk;
458 	struct rose_sock *rose = rose_sk(sk);
459 	int val = 0;
460 	int len;
461 
462 	if (level != SOL_ROSE)
463 		return -ENOPROTOOPT;
464 
465 	if (get_user(len, optlen))
466 		return -EFAULT;
467 
468 	if (len < 0)
469 		return -EINVAL;
470 
471 	switch (optname) {
472 	case ROSE_DEFER:
473 		val = rose->defer;
474 		break;
475 
476 	case ROSE_T1:
477 		val = rose->t1 / HZ;
478 		break;
479 
480 	case ROSE_T2:
481 		val = rose->t2 / HZ;
482 		break;
483 
484 	case ROSE_T3:
485 		val = rose->t3 / HZ;
486 		break;
487 
488 	case ROSE_HOLDBACK:
489 		val = rose->hb / HZ;
490 		break;
491 
492 	case ROSE_IDLE:
493 		val = rose->idle / (60 * HZ);
494 		break;
495 
496 	case ROSE_QBITINCL:
497 		val = rose->qbitincl;
498 		break;
499 
500 	default:
501 		return -ENOPROTOOPT;
502 	}
503 
504 	len = min_t(unsigned int, len, sizeof(int));
505 
506 	if (put_user(len, optlen))
507 		return -EFAULT;
508 
509 	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
510 }
511 
rose_listen(struct socket * sock,int backlog)512 static int rose_listen(struct socket *sock, int backlog)
513 {
514 	struct sock *sk = sock->sk;
515 
516 	lock_sock(sk);
517 	if (sock->state != SS_UNCONNECTED) {
518 		release_sock(sk);
519 		return -EINVAL;
520 	}
521 
522 	if (sk->sk_state != TCP_LISTEN) {
523 		struct rose_sock *rose = rose_sk(sk);
524 
525 		rose->dest_ndigis = 0;
526 		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
527 		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
528 		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
529 		sk->sk_max_ack_backlog = backlog;
530 		sk->sk_state           = TCP_LISTEN;
531 		release_sock(sk);
532 		return 0;
533 	}
534 	release_sock(sk);
535 
536 	return -EOPNOTSUPP;
537 }
538 
539 static struct proto rose_proto = {
540 	.name	  = "ROSE",
541 	.owner	  = THIS_MODULE,
542 	.obj_size = sizeof(struct rose_sock),
543 };
544 
rose_create(struct net * net,struct socket * sock,int protocol,int kern)545 static int rose_create(struct net *net, struct socket *sock, int protocol,
546 		       int kern)
547 {
548 	struct sock *sk;
549 	struct rose_sock *rose;
550 
551 	if (!net_eq(net, &init_net))
552 		return -EAFNOSUPPORT;
553 
554 	if (sock->type != SOCK_SEQPACKET || protocol != 0)
555 		return -ESOCKTNOSUPPORT;
556 
557 	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
558 	if (sk == NULL)
559 		return -ENOMEM;
560 
561 	rose = rose_sk(sk);
562 
563 	sock_init_data(sock, sk);
564 
565 	skb_queue_head_init(&rose->ack_queue);
566 #ifdef M_BIT
567 	skb_queue_head_init(&rose->frag_queue);
568 	rose->fraglen    = 0;
569 #endif
570 
571 	sock->ops    = &rose_proto_ops;
572 	sk->sk_protocol = protocol;
573 
574 	timer_setup(&rose->timer, NULL, 0);
575 	timer_setup(&rose->idletimer, NULL, 0);
576 
577 	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
578 	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
579 	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
580 	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
581 	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
582 
583 	rose->state = ROSE_STATE_0;
584 
585 	return 0;
586 }
587 
rose_make_new(struct sock * osk)588 static struct sock *rose_make_new(struct sock *osk)
589 {
590 	struct sock *sk;
591 	struct rose_sock *rose, *orose;
592 
593 	if (osk->sk_type != SOCK_SEQPACKET)
594 		return NULL;
595 
596 	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
597 	if (sk == NULL)
598 		return NULL;
599 
600 	rose = rose_sk(sk);
601 
602 	sock_init_data(NULL, sk);
603 
604 	skb_queue_head_init(&rose->ack_queue);
605 #ifdef M_BIT
606 	skb_queue_head_init(&rose->frag_queue);
607 	rose->fraglen  = 0;
608 #endif
609 
610 	sk->sk_type     = osk->sk_type;
611 	sk->sk_priority = osk->sk_priority;
612 	sk->sk_protocol = osk->sk_protocol;
613 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
614 	sk->sk_sndbuf   = osk->sk_sndbuf;
615 	sk->sk_state    = TCP_ESTABLISHED;
616 	sock_copy_flags(sk, osk);
617 
618 	timer_setup(&rose->timer, NULL, 0);
619 	timer_setup(&rose->idletimer, NULL, 0);
620 
621 	orose		= rose_sk(osk);
622 	rose->t1	= orose->t1;
623 	rose->t2	= orose->t2;
624 	rose->t3	= orose->t3;
625 	rose->hb	= orose->hb;
626 	rose->idle	= orose->idle;
627 	rose->defer	= orose->defer;
628 	rose->device	= orose->device;
629 	if (rose->device)
630 		dev_hold(rose->device);
631 	rose->qbitincl	= orose->qbitincl;
632 
633 	return sk;
634 }
635 
rose_release(struct socket * sock)636 static int rose_release(struct socket *sock)
637 {
638 	struct sock *sk = sock->sk;
639 	struct rose_sock *rose;
640 
641 	if (sk == NULL) return 0;
642 
643 	sock_hold(sk);
644 	sock_orphan(sk);
645 	lock_sock(sk);
646 	rose = rose_sk(sk);
647 
648 	switch (rose->state) {
649 	case ROSE_STATE_0:
650 		release_sock(sk);
651 		rose_disconnect(sk, 0, -1, -1);
652 		lock_sock(sk);
653 		rose_destroy_socket(sk);
654 		break;
655 
656 	case ROSE_STATE_2:
657 		rose->neighbour->use--;
658 		release_sock(sk);
659 		rose_disconnect(sk, 0, -1, -1);
660 		lock_sock(sk);
661 		rose_destroy_socket(sk);
662 		break;
663 
664 	case ROSE_STATE_1:
665 	case ROSE_STATE_3:
666 	case ROSE_STATE_4:
667 	case ROSE_STATE_5:
668 		rose_clear_queues(sk);
669 		rose_stop_idletimer(sk);
670 		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
671 		rose_start_t3timer(sk);
672 		rose->state  = ROSE_STATE_2;
673 		sk->sk_state    = TCP_CLOSE;
674 		sk->sk_shutdown |= SEND_SHUTDOWN;
675 		sk->sk_state_change(sk);
676 		sock_set_flag(sk, SOCK_DEAD);
677 		sock_set_flag(sk, SOCK_DESTROY);
678 		break;
679 
680 	default:
681 		break;
682 	}
683 
684 	spin_lock_bh(&rose_list_lock);
685 	dev_put(rose->device);
686 	rose->device = NULL;
687 	spin_unlock_bh(&rose_list_lock);
688 	sock->sk = NULL;
689 	release_sock(sk);
690 	sock_put(sk);
691 
692 	return 0;
693 }
694 
rose_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)695 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
696 {
697 	struct sock *sk = sock->sk;
698 	struct rose_sock *rose = rose_sk(sk);
699 	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
700 	struct net_device *dev;
701 	ax25_address *source;
702 	ax25_uid_assoc *user;
703 	int n;
704 
705 	if (!sock_flag(sk, SOCK_ZAPPED))
706 		return -EINVAL;
707 
708 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
709 		return -EINVAL;
710 
711 	if (addr->srose_family != AF_ROSE)
712 		return -EINVAL;
713 
714 	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
715 		return -EINVAL;
716 
717 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
718 		return -EINVAL;
719 
720 	if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
721 		return -EADDRNOTAVAIL;
722 
723 	source = &addr->srose_call;
724 
725 	user = ax25_findbyuid(current_euid());
726 	if (user) {
727 		rose->source_call = user->call;
728 		ax25_uid_put(user);
729 	} else {
730 		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
731 			dev_put(dev);
732 			return -EACCES;
733 		}
734 		rose->source_call   = *source;
735 	}
736 
737 	rose->source_addr   = addr->srose_addr;
738 	rose->device        = dev;
739 	rose->source_ndigis = addr->srose_ndigis;
740 
741 	if (addr_len == sizeof(struct full_sockaddr_rose)) {
742 		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
743 		for (n = 0 ; n < addr->srose_ndigis ; n++)
744 			rose->source_digis[n] = full_addr->srose_digis[n];
745 	} else {
746 		if (rose->source_ndigis == 1) {
747 			rose->source_digis[0] = addr->srose_digi;
748 		}
749 	}
750 
751 	rose_insert_socket(sk);
752 
753 	sock_reset_flag(sk, SOCK_ZAPPED);
754 
755 	return 0;
756 }
757 
rose_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags)758 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
759 {
760 	struct sock *sk = sock->sk;
761 	struct rose_sock *rose = rose_sk(sk);
762 	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
763 	unsigned char cause, diagnostic;
764 	ax25_uid_assoc *user;
765 	int n, err = 0;
766 
767 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
768 		return -EINVAL;
769 
770 	if (addr->srose_family != AF_ROSE)
771 		return -EINVAL;
772 
773 	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
774 		return -EINVAL;
775 
776 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
777 		return -EINVAL;
778 
779 	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
780 	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
781 		return -EINVAL;
782 
783 	lock_sock(sk);
784 
785 	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
786 		/* Connect completed during a ERESTARTSYS event */
787 		sock->state = SS_CONNECTED;
788 		goto out_release;
789 	}
790 
791 	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
792 		sock->state = SS_UNCONNECTED;
793 		err = -ECONNREFUSED;
794 		goto out_release;
795 	}
796 
797 	if (sk->sk_state == TCP_ESTABLISHED) {
798 		/* No reconnect on a seqpacket socket */
799 		err = -EISCONN;
800 		goto out_release;
801 	}
802 
803 	sk->sk_state   = TCP_CLOSE;
804 	sock->state = SS_UNCONNECTED;
805 
806 	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
807 					 &diagnostic, 0);
808 	if (!rose->neighbour) {
809 		err = -ENETUNREACH;
810 		goto out_release;
811 	}
812 
813 	rose->lci = rose_new_lci(rose->neighbour);
814 	if (!rose->lci) {
815 		err = -ENETUNREACH;
816 		goto out_release;
817 	}
818 
819 	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
820 		struct net_device *dev;
821 
822 		sock_reset_flag(sk, SOCK_ZAPPED);
823 
824 		dev = rose_dev_first();
825 		if (!dev) {
826 			err = -ENETUNREACH;
827 			goto out_release;
828 		}
829 
830 		user = ax25_findbyuid(current_euid());
831 		if (!user) {
832 			err = -EINVAL;
833 			dev_put(dev);
834 			goto out_release;
835 		}
836 
837 		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
838 		rose->source_call = user->call;
839 		rose->device      = dev;
840 		ax25_uid_put(user);
841 
842 		rose_insert_socket(sk);		/* Finish the bind */
843 	}
844 	rose->dest_addr   = addr->srose_addr;
845 	rose->dest_call   = addr->srose_call;
846 	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
847 	rose->dest_ndigis = addr->srose_ndigis;
848 
849 	if (addr_len == sizeof(struct full_sockaddr_rose)) {
850 		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
851 		for (n = 0 ; n < addr->srose_ndigis ; n++)
852 			rose->dest_digis[n] = full_addr->srose_digis[n];
853 	} else {
854 		if (rose->dest_ndigis == 1) {
855 			rose->dest_digis[0] = addr->srose_digi;
856 		}
857 	}
858 
859 	/* Move to connecting socket, start sending Connect Requests */
860 	sock->state   = SS_CONNECTING;
861 	sk->sk_state     = TCP_SYN_SENT;
862 
863 	rose->state = ROSE_STATE_1;
864 
865 	rose->neighbour->use++;
866 
867 	rose_write_internal(sk, ROSE_CALL_REQUEST);
868 	rose_start_heartbeat(sk);
869 	rose_start_t1timer(sk);
870 
871 	/* Now the loop */
872 	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
873 		err = -EINPROGRESS;
874 		goto out_release;
875 	}
876 
877 	/*
878 	 * A Connect Ack with Choke or timeout or failed routing will go to
879 	 * closed.
880 	 */
881 	if (sk->sk_state == TCP_SYN_SENT) {
882 		DEFINE_WAIT(wait);
883 
884 		for (;;) {
885 			prepare_to_wait(sk_sleep(sk), &wait,
886 					TASK_INTERRUPTIBLE);
887 			if (sk->sk_state != TCP_SYN_SENT)
888 				break;
889 			if (!signal_pending(current)) {
890 				release_sock(sk);
891 				schedule();
892 				lock_sock(sk);
893 				continue;
894 			}
895 			err = -ERESTARTSYS;
896 			break;
897 		}
898 		finish_wait(sk_sleep(sk), &wait);
899 
900 		if (err)
901 			goto out_release;
902 	}
903 
904 	if (sk->sk_state != TCP_ESTABLISHED) {
905 		sock->state = SS_UNCONNECTED;
906 		err = sock_error(sk);	/* Always set at this point */
907 		goto out_release;
908 	}
909 
910 	sock->state = SS_CONNECTED;
911 
912 out_release:
913 	release_sock(sk);
914 
915 	return err;
916 }
917 
rose_accept(struct socket * sock,struct socket * newsock,int flags,bool kern)918 static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
919 		       bool kern)
920 {
921 	struct sk_buff *skb;
922 	struct sock *newsk;
923 	DEFINE_WAIT(wait);
924 	struct sock *sk;
925 	int err = 0;
926 
927 	if ((sk = sock->sk) == NULL)
928 		return -EINVAL;
929 
930 	lock_sock(sk);
931 	if (sk->sk_type != SOCK_SEQPACKET) {
932 		err = -EOPNOTSUPP;
933 		goto out_release;
934 	}
935 
936 	if (sk->sk_state != TCP_LISTEN) {
937 		err = -EINVAL;
938 		goto out_release;
939 	}
940 
941 	/*
942 	 *	The write queue this time is holding sockets ready to use
943 	 *	hooked into the SABM we saved
944 	 */
945 	for (;;) {
946 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
947 
948 		skb = skb_dequeue(&sk->sk_receive_queue);
949 		if (skb)
950 			break;
951 
952 		if (flags & O_NONBLOCK) {
953 			err = -EWOULDBLOCK;
954 			break;
955 		}
956 		if (!signal_pending(current)) {
957 			release_sock(sk);
958 			schedule();
959 			lock_sock(sk);
960 			continue;
961 		}
962 		err = -ERESTARTSYS;
963 		break;
964 	}
965 	finish_wait(sk_sleep(sk), &wait);
966 	if (err)
967 		goto out_release;
968 
969 	newsk = skb->sk;
970 	sock_graft(newsk, newsock);
971 
972 	/* Now attach up the new socket */
973 	skb->sk = NULL;
974 	kfree_skb(skb);
975 	sk_acceptq_removed(sk);
976 
977 out_release:
978 	release_sock(sk);
979 
980 	return err;
981 }
982 
rose_getname(struct socket * sock,struct sockaddr * uaddr,int peer)983 static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
984 	int peer)
985 {
986 	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
987 	struct sock *sk = sock->sk;
988 	struct rose_sock *rose = rose_sk(sk);
989 	int n;
990 
991 	memset(srose, 0, sizeof(*srose));
992 	if (peer != 0) {
993 		if (sk->sk_state != TCP_ESTABLISHED)
994 			return -ENOTCONN;
995 		srose->srose_family = AF_ROSE;
996 		srose->srose_addr   = rose->dest_addr;
997 		srose->srose_call   = rose->dest_call;
998 		srose->srose_ndigis = rose->dest_ndigis;
999 		for (n = 0; n < rose->dest_ndigis; n++)
1000 			srose->srose_digis[n] = rose->dest_digis[n];
1001 	} else {
1002 		srose->srose_family = AF_ROSE;
1003 		srose->srose_addr   = rose->source_addr;
1004 		srose->srose_call   = rose->source_call;
1005 		srose->srose_ndigis = rose->source_ndigis;
1006 		for (n = 0; n < rose->source_ndigis; n++)
1007 			srose->srose_digis[n] = rose->source_digis[n];
1008 	}
1009 
1010 	return sizeof(struct full_sockaddr_rose);
1011 }
1012 
rose_rx_call_request(struct sk_buff * skb,struct net_device * dev,struct rose_neigh * neigh,unsigned int lci)1013 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
1014 {
1015 	struct sock *sk;
1016 	struct sock *make;
1017 	struct rose_sock *make_rose;
1018 	struct rose_facilities_struct facilities;
1019 	int n;
1020 
1021 	skb->sk = NULL;		/* Initially we don't know who it's for */
1022 
1023 	/*
1024 	 *	skb->data points to the rose frame start
1025 	 */
1026 	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
1027 
1028 	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
1029 				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
1030 				   &facilities)) {
1031 		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
1032 		return 0;
1033 	}
1034 
1035 	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
1036 
1037 	/*
1038 	 * We can't accept the Call Request.
1039 	 */
1040 	if (sk == NULL || sk_acceptq_is_full(sk) ||
1041 	    (make = rose_make_new(sk)) == NULL) {
1042 		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
1043 		return 0;
1044 	}
1045 
1046 	skb->sk     = make;
1047 	make->sk_state = TCP_ESTABLISHED;
1048 	make_rose = rose_sk(make);
1049 
1050 	make_rose->lci           = lci;
1051 	make_rose->dest_addr     = facilities.dest_addr;
1052 	make_rose->dest_call     = facilities.dest_call;
1053 	make_rose->dest_ndigis   = facilities.dest_ndigis;
1054 	for (n = 0 ; n < facilities.dest_ndigis ; n++)
1055 		make_rose->dest_digis[n] = facilities.dest_digis[n];
1056 	make_rose->source_addr   = facilities.source_addr;
1057 	make_rose->source_call   = facilities.source_call;
1058 	make_rose->source_ndigis = facilities.source_ndigis;
1059 	for (n = 0 ; n < facilities.source_ndigis ; n++)
1060 		make_rose->source_digis[n] = facilities.source_digis[n];
1061 	make_rose->neighbour     = neigh;
1062 	make_rose->device        = dev;
1063 	make_rose->facilities    = facilities;
1064 
1065 	make_rose->neighbour->use++;
1066 
1067 	if (rose_sk(sk)->defer) {
1068 		make_rose->state = ROSE_STATE_5;
1069 	} else {
1070 		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1071 		make_rose->state = ROSE_STATE_3;
1072 		rose_start_idletimer(make);
1073 	}
1074 
1075 	make_rose->condition = 0x00;
1076 	make_rose->vs        = 0;
1077 	make_rose->va        = 0;
1078 	make_rose->vr        = 0;
1079 	make_rose->vl        = 0;
1080 	sk_acceptq_added(sk);
1081 
1082 	rose_insert_socket(make);
1083 
1084 	skb_queue_head(&sk->sk_receive_queue, skb);
1085 
1086 	rose_start_heartbeat(make);
1087 
1088 	if (!sock_flag(sk, SOCK_DEAD))
1089 		sk->sk_data_ready(sk);
1090 
1091 	return 1;
1092 }
1093 
rose_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1094 static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1095 {
1096 	struct sock *sk = sock->sk;
1097 	struct rose_sock *rose = rose_sk(sk);
1098 	DECLARE_SOCKADDR(struct sockaddr_rose *, usrose, msg->msg_name);
1099 	int err;
1100 	struct full_sockaddr_rose srose;
1101 	struct sk_buff *skb;
1102 	unsigned char *asmptr;
1103 	int n, size, qbit = 0;
1104 
1105 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1106 		return -EINVAL;
1107 
1108 	if (sock_flag(sk, SOCK_ZAPPED))
1109 		return -EADDRNOTAVAIL;
1110 
1111 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1112 		send_sig(SIGPIPE, current, 0);
1113 		return -EPIPE;
1114 	}
1115 
1116 	if (rose->neighbour == NULL || rose->device == NULL)
1117 		return -ENETUNREACH;
1118 
1119 	if (usrose != NULL) {
1120 		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1121 			return -EINVAL;
1122 		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1123 		memcpy(&srose, usrose, msg->msg_namelen);
1124 		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1125 		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1126 			return -EISCONN;
1127 		if (srose.srose_ndigis != rose->dest_ndigis)
1128 			return -EISCONN;
1129 		if (srose.srose_ndigis == rose->dest_ndigis) {
1130 			for (n = 0 ; n < srose.srose_ndigis ; n++)
1131 				if (ax25cmp(&rose->dest_digis[n],
1132 					    &srose.srose_digis[n]))
1133 					return -EISCONN;
1134 		}
1135 		if (srose.srose_family != AF_ROSE)
1136 			return -EINVAL;
1137 	} else {
1138 		if (sk->sk_state != TCP_ESTABLISHED)
1139 			return -ENOTCONN;
1140 
1141 		srose.srose_family = AF_ROSE;
1142 		srose.srose_addr   = rose->dest_addr;
1143 		srose.srose_call   = rose->dest_call;
1144 		srose.srose_ndigis = rose->dest_ndigis;
1145 		for (n = 0 ; n < rose->dest_ndigis ; n++)
1146 			srose.srose_digis[n] = rose->dest_digis[n];
1147 	}
1148 
1149 	/* Build a packet */
1150 	/* Sanity check the packet size */
1151 	if (len > 65535)
1152 		return -EMSGSIZE;
1153 
1154 	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1155 
1156 	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1157 		return err;
1158 
1159 	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1160 
1161 	/*
1162 	 *	Put the data on the end
1163 	 */
1164 
1165 	skb_reset_transport_header(skb);
1166 	skb_put(skb, len);
1167 
1168 	err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1169 	if (err) {
1170 		kfree_skb(skb);
1171 		return err;
1172 	}
1173 
1174 	/*
1175 	 *	If the Q BIT Include socket option is in force, the first
1176 	 *	byte of the user data is the logical value of the Q Bit.
1177 	 */
1178 	if (rose->qbitincl) {
1179 		qbit = skb->data[0];
1180 		skb_pull(skb, 1);
1181 	}
1182 
1183 	/*
1184 	 *	Push down the ROSE header
1185 	 */
1186 	asmptr = skb_push(skb, ROSE_MIN_LEN);
1187 
1188 	/* Build a ROSE Network header */
1189 	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1190 	asmptr[1] = (rose->lci >> 0) & 0xFF;
1191 	asmptr[2] = ROSE_DATA;
1192 
1193 	if (qbit)
1194 		asmptr[0] |= ROSE_Q_BIT;
1195 
1196 	if (sk->sk_state != TCP_ESTABLISHED) {
1197 		kfree_skb(skb);
1198 		return -ENOTCONN;
1199 	}
1200 
1201 #ifdef M_BIT
1202 #define ROSE_PACLEN (256-ROSE_MIN_LEN)
1203 	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1204 		unsigned char header[ROSE_MIN_LEN];
1205 		struct sk_buff *skbn;
1206 		int frontlen;
1207 		int lg;
1208 
1209 		/* Save a copy of the Header */
1210 		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1211 		skb_pull(skb, ROSE_MIN_LEN);
1212 
1213 		frontlen = skb_headroom(skb);
1214 
1215 		while (skb->len > 0) {
1216 			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1217 				kfree_skb(skb);
1218 				return err;
1219 			}
1220 
1221 			skbn->sk   = sk;
1222 			skbn->free = 1;
1223 			skbn->arp  = 1;
1224 
1225 			skb_reserve(skbn, frontlen);
1226 
1227 			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1228 
1229 			/* Copy the user data */
1230 			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1231 			skb_pull(skb, lg);
1232 
1233 			/* Duplicate the Header */
1234 			skb_push(skbn, ROSE_MIN_LEN);
1235 			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1236 
1237 			if (skb->len > 0)
1238 				skbn->data[2] |= M_BIT;
1239 
1240 			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1241 		}
1242 
1243 		skb->free = 1;
1244 		kfree_skb(skb);
1245 	} else {
1246 		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1247 	}
1248 #else
1249 	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1250 #endif
1251 
1252 	rose_kick(sk);
1253 
1254 	return len;
1255 }
1256 
1257 
rose_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)1258 static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1259 			int flags)
1260 {
1261 	struct sock *sk = sock->sk;
1262 	struct rose_sock *rose = rose_sk(sk);
1263 	size_t copied;
1264 	unsigned char *asmptr;
1265 	struct sk_buff *skb;
1266 	int n, er, qbit;
1267 
1268 	/*
1269 	 * This works for seqpacket too. The receiver has ordered the queue for
1270 	 * us! We do one quick check first though
1271 	 */
1272 	if (sk->sk_state != TCP_ESTABLISHED)
1273 		return -ENOTCONN;
1274 
1275 	/* Now we can treat all alike */
1276 	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
1277 		return er;
1278 
1279 	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1280 
1281 	skb_pull(skb, ROSE_MIN_LEN);
1282 
1283 	if (rose->qbitincl) {
1284 		asmptr  = skb_push(skb, 1);
1285 		*asmptr = qbit;
1286 	}
1287 
1288 	skb_reset_transport_header(skb);
1289 	copied     = skb->len;
1290 
1291 	if (copied > size) {
1292 		copied = size;
1293 		msg->msg_flags |= MSG_TRUNC;
1294 	}
1295 
1296 	skb_copy_datagram_msg(skb, 0, msg, copied);
1297 
1298 	if (msg->msg_name) {
1299 		struct sockaddr_rose *srose;
1300 		DECLARE_SOCKADDR(struct full_sockaddr_rose *, full_srose,
1301 				 msg->msg_name);
1302 
1303 		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1304 		srose = msg->msg_name;
1305 		srose->srose_family = AF_ROSE;
1306 		srose->srose_addr   = rose->dest_addr;
1307 		srose->srose_call   = rose->dest_call;
1308 		srose->srose_ndigis = rose->dest_ndigis;
1309 		for (n = 0 ; n < rose->dest_ndigis ; n++)
1310 			full_srose->srose_digis[n] = rose->dest_digis[n];
1311 		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1312 	}
1313 
1314 	skb_free_datagram(sk, skb);
1315 
1316 	return copied;
1317 }
1318 
1319 
rose_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1320 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1321 {
1322 	struct sock *sk = sock->sk;
1323 	struct rose_sock *rose = rose_sk(sk);
1324 	void __user *argp = (void __user *)arg;
1325 
1326 	switch (cmd) {
1327 	case TIOCOUTQ: {
1328 		long amount;
1329 
1330 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1331 		if (amount < 0)
1332 			amount = 0;
1333 		return put_user(amount, (unsigned int __user *) argp);
1334 	}
1335 
1336 	case TIOCINQ: {
1337 		struct sk_buff *skb;
1338 		long amount = 0L;
1339 
1340 		spin_lock_irq(&sk->sk_receive_queue.lock);
1341 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1342 			amount = skb->len;
1343 		spin_unlock_irq(&sk->sk_receive_queue.lock);
1344 		return put_user(amount, (unsigned int __user *) argp);
1345 	}
1346 
1347 	case SIOCGIFADDR:
1348 	case SIOCSIFADDR:
1349 	case SIOCGIFDSTADDR:
1350 	case SIOCSIFDSTADDR:
1351 	case SIOCGIFBRDADDR:
1352 	case SIOCSIFBRDADDR:
1353 	case SIOCGIFNETMASK:
1354 	case SIOCSIFNETMASK:
1355 	case SIOCGIFMETRIC:
1356 	case SIOCSIFMETRIC:
1357 		return -EINVAL;
1358 
1359 	case SIOCADDRT:
1360 	case SIOCDELRT:
1361 	case SIOCRSCLRRT:
1362 		if (!capable(CAP_NET_ADMIN))
1363 			return -EPERM;
1364 		return rose_rt_ioctl(cmd, argp);
1365 
1366 	case SIOCRSGCAUSE: {
1367 		struct rose_cause_struct rose_cause;
1368 		rose_cause.cause      = rose->cause;
1369 		rose_cause.diagnostic = rose->diagnostic;
1370 		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1371 	}
1372 
1373 	case SIOCRSSCAUSE: {
1374 		struct rose_cause_struct rose_cause;
1375 		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1376 			return -EFAULT;
1377 		rose->cause      = rose_cause.cause;
1378 		rose->diagnostic = rose_cause.diagnostic;
1379 		return 0;
1380 	}
1381 
1382 	case SIOCRSSL2CALL:
1383 		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1384 		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1385 			ax25_listen_release(&rose_callsign, NULL);
1386 		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1387 			return -EFAULT;
1388 		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1389 			return ax25_listen_register(&rose_callsign, NULL);
1390 
1391 		return 0;
1392 
1393 	case SIOCRSGL2CALL:
1394 		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1395 
1396 	case SIOCRSACCEPT:
1397 		if (rose->state == ROSE_STATE_5) {
1398 			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1399 			rose_start_idletimer(sk);
1400 			rose->condition = 0x00;
1401 			rose->vs        = 0;
1402 			rose->va        = 0;
1403 			rose->vr        = 0;
1404 			rose->vl        = 0;
1405 			rose->state     = ROSE_STATE_3;
1406 		}
1407 		return 0;
1408 
1409 	default:
1410 		return -ENOIOCTLCMD;
1411 	}
1412 
1413 	return 0;
1414 }
1415 
1416 #ifdef CONFIG_PROC_FS
rose_info_start(struct seq_file * seq,loff_t * pos)1417 static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1418 	__acquires(rose_list_lock)
1419 {
1420 	spin_lock_bh(&rose_list_lock);
1421 	return seq_hlist_start_head(&rose_list, *pos);
1422 }
1423 
rose_info_next(struct seq_file * seq,void * v,loff_t * pos)1424 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1425 {
1426 	return seq_hlist_next(v, &rose_list, pos);
1427 }
1428 
rose_info_stop(struct seq_file * seq,void * v)1429 static void rose_info_stop(struct seq_file *seq, void *v)
1430 	__releases(rose_list_lock)
1431 {
1432 	spin_unlock_bh(&rose_list_lock);
1433 }
1434 
rose_info_show(struct seq_file * seq,void * v)1435 static int rose_info_show(struct seq_file *seq, void *v)
1436 {
1437 	char buf[11], rsbuf[11];
1438 
1439 	if (v == SEQ_START_TOKEN)
1440 		seq_puts(seq,
1441 			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1442 
1443 	else {
1444 		struct sock *s = sk_entry(v);
1445 		struct rose_sock *rose = rose_sk(s);
1446 		const char *devname, *callsign;
1447 		const struct net_device *dev = rose->device;
1448 
1449 		if (!dev)
1450 			devname = "???";
1451 		else
1452 			devname = dev->name;
1453 
1454 		seq_printf(seq, "%-10s %-9s ",
1455 			   rose2asc(rsbuf, &rose->dest_addr),
1456 			   ax2asc(buf, &rose->dest_call));
1457 
1458 		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1459 			callsign = "??????-?";
1460 		else
1461 			callsign = ax2asc(buf, &rose->source_call);
1462 
1463 		seq_printf(seq,
1464 			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1465 			rose2asc(rsbuf, &rose->source_addr),
1466 			callsign,
1467 			devname,
1468 			rose->lci & 0x0FFF,
1469 			(rose->neighbour) ? rose->neighbour->number : 0,
1470 			rose->state,
1471 			rose->vs,
1472 			rose->vr,
1473 			rose->va,
1474 			ax25_display_timer(&rose->timer) / HZ,
1475 			rose->t1 / HZ,
1476 			rose->t2 / HZ,
1477 			rose->t3 / HZ,
1478 			rose->hb / HZ,
1479 			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1480 			rose->idle / (60 * HZ),
1481 			sk_wmem_alloc_get(s),
1482 			sk_rmem_alloc_get(s),
1483 			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1484 	}
1485 
1486 	return 0;
1487 }
1488 
1489 static const struct seq_operations rose_info_seqops = {
1490 	.start = rose_info_start,
1491 	.next = rose_info_next,
1492 	.stop = rose_info_stop,
1493 	.show = rose_info_show,
1494 };
1495 #endif	/* CONFIG_PROC_FS */
1496 
1497 static const struct net_proto_family rose_family_ops = {
1498 	.family		=	PF_ROSE,
1499 	.create		=	rose_create,
1500 	.owner		=	THIS_MODULE,
1501 };
1502 
1503 static const struct proto_ops rose_proto_ops = {
1504 	.family		=	PF_ROSE,
1505 	.owner		=	THIS_MODULE,
1506 	.release	=	rose_release,
1507 	.bind		=	rose_bind,
1508 	.connect	=	rose_connect,
1509 	.socketpair	=	sock_no_socketpair,
1510 	.accept		=	rose_accept,
1511 	.getname	=	rose_getname,
1512 	.poll		=	datagram_poll,
1513 	.ioctl		=	rose_ioctl,
1514 	.gettstamp	=	sock_gettstamp,
1515 	.listen		=	rose_listen,
1516 	.shutdown	=	sock_no_shutdown,
1517 	.setsockopt	=	rose_setsockopt,
1518 	.getsockopt	=	rose_getsockopt,
1519 	.sendmsg	=	rose_sendmsg,
1520 	.recvmsg	=	rose_recvmsg,
1521 	.mmap		=	sock_no_mmap,
1522 	.sendpage	=	sock_no_sendpage,
1523 };
1524 
1525 static struct notifier_block rose_dev_notifier = {
1526 	.notifier_call	=	rose_device_event,
1527 };
1528 
1529 static struct net_device **dev_rose;
1530 
1531 static struct ax25_protocol rose_pid = {
1532 	.pid	= AX25_P_ROSE,
1533 	.func	= rose_route_frame
1534 };
1535 
1536 static struct ax25_linkfail rose_linkfail_notifier = {
1537 	.func	= rose_link_failed
1538 };
1539 
rose_proto_init(void)1540 static int __init rose_proto_init(void)
1541 {
1542 	int i;
1543 	int rc;
1544 
1545 	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1546 		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
1547 		rc = -EINVAL;
1548 		goto out;
1549 	}
1550 
1551 	rc = proto_register(&rose_proto, 0);
1552 	if (rc != 0)
1553 		goto out;
1554 
1555 	rose_callsign = null_ax25_address;
1556 
1557 	dev_rose = kcalloc(rose_ndevs, sizeof(struct net_device *),
1558 			   GFP_KERNEL);
1559 	if (dev_rose == NULL) {
1560 		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1561 		rc = -ENOMEM;
1562 		goto out_proto_unregister;
1563 	}
1564 
1565 	for (i = 0; i < rose_ndevs; i++) {
1566 		struct net_device *dev;
1567 		char name[IFNAMSIZ];
1568 
1569 		sprintf(name, "rose%d", i);
1570 		dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
1571 		if (!dev) {
1572 			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1573 			rc = -ENOMEM;
1574 			goto fail;
1575 		}
1576 		rc = register_netdev(dev);
1577 		if (rc) {
1578 			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1579 			free_netdev(dev);
1580 			goto fail;
1581 		}
1582 		rose_set_lockdep_key(dev);
1583 		dev_rose[i] = dev;
1584 	}
1585 
1586 	sock_register(&rose_family_ops);
1587 	register_netdevice_notifier(&rose_dev_notifier);
1588 
1589 	ax25_register_pid(&rose_pid);
1590 	ax25_linkfail_register(&rose_linkfail_notifier);
1591 
1592 #ifdef CONFIG_SYSCTL
1593 	rose_register_sysctl();
1594 #endif
1595 	rose_loopback_init();
1596 
1597 	rose_add_loopback_neigh();
1598 
1599 	proc_create_seq("rose", 0444, init_net.proc_net, &rose_info_seqops);
1600 	proc_create_seq("rose_neigh", 0444, init_net.proc_net,
1601 		    &rose_neigh_seqops);
1602 	proc_create_seq("rose_nodes", 0444, init_net.proc_net,
1603 		    &rose_node_seqops);
1604 	proc_create_seq("rose_routes", 0444, init_net.proc_net,
1605 		    &rose_route_seqops);
1606 out:
1607 	return rc;
1608 fail:
1609 	while (--i >= 0) {
1610 		unregister_netdev(dev_rose[i]);
1611 		free_netdev(dev_rose[i]);
1612 	}
1613 	kfree(dev_rose);
1614 out_proto_unregister:
1615 	proto_unregister(&rose_proto);
1616 	goto out;
1617 }
1618 module_init(rose_proto_init);
1619 
1620 module_param(rose_ndevs, int, 0);
1621 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1622 
1623 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1624 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1625 MODULE_LICENSE("GPL");
1626 MODULE_ALIAS_NETPROTO(PF_ROSE);
1627 
rose_exit(void)1628 static void __exit rose_exit(void)
1629 {
1630 	int i;
1631 
1632 	remove_proc_entry("rose", init_net.proc_net);
1633 	remove_proc_entry("rose_neigh", init_net.proc_net);
1634 	remove_proc_entry("rose_nodes", init_net.proc_net);
1635 	remove_proc_entry("rose_routes", init_net.proc_net);
1636 	rose_loopback_clear();
1637 
1638 	rose_rt_free();
1639 
1640 	ax25_protocol_release(AX25_P_ROSE);
1641 	ax25_linkfail_release(&rose_linkfail_notifier);
1642 
1643 	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1644 		ax25_listen_release(&rose_callsign, NULL);
1645 
1646 #ifdef CONFIG_SYSCTL
1647 	rose_unregister_sysctl();
1648 #endif
1649 	unregister_netdevice_notifier(&rose_dev_notifier);
1650 
1651 	sock_unregister(PF_ROSE);
1652 
1653 	for (i = 0; i < rose_ndevs; i++) {
1654 		struct net_device *dev = dev_rose[i];
1655 
1656 		if (dev) {
1657 			unregister_netdev(dev);
1658 			free_netdev(dev);
1659 		}
1660 	}
1661 
1662 	kfree(dev_rose);
1663 	proto_unregister(&rose_proto);
1664 }
1665 
1666 module_exit(rose_exit);
1667