• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  net/dccp/proto.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *	This program is free software; you can redistribute it and/or modify it
8  *	under the terms of the GNU General Public License version 2 as
9  *	published by the Free Software Foundation.
10  */
11 
12 #include <linux/dccp.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/skbuff.h>
18 #include <linux/netdevice.h>
19 #include <linux/in.h>
20 #include <linux/if_arp.h>
21 #include <linux/init.h>
22 #include <linux/random.h>
23 #include <linux/slab.h>
24 #include <net/checksum.h>
25 
26 #include <net/inet_sock.h>
27 #include <net/inet_common.h>
28 #include <net/sock.h>
29 #include <net/xfrm.h>
30 
31 #include <asm/ioctls.h>
32 #include <linux/spinlock.h>
33 #include <linux/timer.h>
34 #include <linux/delay.h>
35 #include <linux/poll.h>
36 
37 #include "ccid.h"
38 #include "dccp.h"
39 #include "feat.h"
40 
41 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
42 
43 EXPORT_SYMBOL_GPL(dccp_statistics);
44 
45 struct percpu_counter dccp_orphan_count;
46 EXPORT_SYMBOL_GPL(dccp_orphan_count);
47 
48 struct inet_hashinfo dccp_hashinfo;
49 EXPORT_SYMBOL_GPL(dccp_hashinfo);
50 
51 /* the maximum queue length for tx in packets. 0 is no limit */
52 int sysctl_dccp_tx_qlen __read_mostly = 5;
53 
54 #ifdef CONFIG_IP_DCCP_DEBUG
dccp_state_name(const int state)55 static const char *dccp_state_name(const int state)
56 {
57 	static const char *const dccp_state_names[] = {
58 	[DCCP_OPEN]		= "OPEN",
59 	[DCCP_REQUESTING]	= "REQUESTING",
60 	[DCCP_PARTOPEN]		= "PARTOPEN",
61 	[DCCP_LISTEN]		= "LISTEN",
62 	[DCCP_RESPOND]		= "RESPOND",
63 	[DCCP_CLOSING]		= "CLOSING",
64 	[DCCP_ACTIVE_CLOSEREQ]	= "CLOSEREQ",
65 	[DCCP_PASSIVE_CLOSE]	= "PASSIVE_CLOSE",
66 	[DCCP_PASSIVE_CLOSEREQ]	= "PASSIVE_CLOSEREQ",
67 	[DCCP_TIME_WAIT]	= "TIME_WAIT",
68 	[DCCP_CLOSED]		= "CLOSED",
69 	};
70 
71 	if (state >= DCCP_MAX_STATES)
72 		return "INVALID STATE!";
73 	else
74 		return dccp_state_names[state];
75 }
76 #endif
77 
dccp_set_state(struct sock * sk,const int state)78 void dccp_set_state(struct sock *sk, const int state)
79 {
80 	const int oldstate = sk->sk_state;
81 
82 	dccp_pr_debug("%s(%p)  %s  -->  %s\n", dccp_role(sk), sk,
83 		      dccp_state_name(oldstate), dccp_state_name(state));
84 	WARN_ON(state == oldstate);
85 
86 	switch (state) {
87 	case DCCP_OPEN:
88 		if (oldstate != DCCP_OPEN)
89 			DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
90 		/* Client retransmits all Confirm options until entering OPEN */
91 		if (oldstate == DCCP_PARTOPEN)
92 			dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
93 		break;
94 
95 	case DCCP_CLOSED:
96 		if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
97 		    oldstate == DCCP_CLOSING)
98 			DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
99 
100 		sk->sk_prot->unhash(sk);
101 		if (inet_csk(sk)->icsk_bind_hash != NULL &&
102 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
103 			inet_put_port(sk);
104 		/* fall through */
105 	default:
106 		if (oldstate == DCCP_OPEN)
107 			DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
108 	}
109 
110 	/* Change state AFTER socket is unhashed to avoid closed
111 	 * socket sitting in hash tables.
112 	 */
113 	sk->sk_state = state;
114 }
115 
116 EXPORT_SYMBOL_GPL(dccp_set_state);
117 
dccp_finish_passive_close(struct sock * sk)118 static void dccp_finish_passive_close(struct sock *sk)
119 {
120 	switch (sk->sk_state) {
121 	case DCCP_PASSIVE_CLOSE:
122 		/* Node (client or server) has received Close packet. */
123 		dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
124 		dccp_set_state(sk, DCCP_CLOSED);
125 		break;
126 	case DCCP_PASSIVE_CLOSEREQ:
127 		/*
128 		 * Client received CloseReq. We set the `active' flag so that
129 		 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
130 		 */
131 		dccp_send_close(sk, 1);
132 		dccp_set_state(sk, DCCP_CLOSING);
133 	}
134 }
135 
dccp_done(struct sock * sk)136 void dccp_done(struct sock *sk)
137 {
138 	dccp_set_state(sk, DCCP_CLOSED);
139 	dccp_clear_xmit_timers(sk);
140 
141 	sk->sk_shutdown = SHUTDOWN_MASK;
142 
143 	if (!sock_flag(sk, SOCK_DEAD))
144 		sk->sk_state_change(sk);
145 	else
146 		inet_csk_destroy_sock(sk);
147 }
148 
149 EXPORT_SYMBOL_GPL(dccp_done);
150 
dccp_packet_name(const int type)151 const char *dccp_packet_name(const int type)
152 {
153 	static const char *const dccp_packet_names[] = {
154 		[DCCP_PKT_REQUEST]  = "REQUEST",
155 		[DCCP_PKT_RESPONSE] = "RESPONSE",
156 		[DCCP_PKT_DATA]	    = "DATA",
157 		[DCCP_PKT_ACK]	    = "ACK",
158 		[DCCP_PKT_DATAACK]  = "DATAACK",
159 		[DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
160 		[DCCP_PKT_CLOSE]    = "CLOSE",
161 		[DCCP_PKT_RESET]    = "RESET",
162 		[DCCP_PKT_SYNC]	    = "SYNC",
163 		[DCCP_PKT_SYNCACK]  = "SYNCACK",
164 	};
165 
166 	if (type >= DCCP_NR_PKT_TYPES)
167 		return "INVALID";
168 	else
169 		return dccp_packet_names[type];
170 }
171 
172 EXPORT_SYMBOL_GPL(dccp_packet_name);
173 
dccp_sk_destruct(struct sock * sk)174 static void dccp_sk_destruct(struct sock *sk)
175 {
176 	struct dccp_sock *dp = dccp_sk(sk);
177 
178 	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
179 	dp->dccps_hc_tx_ccid = NULL;
180 	inet_sock_destruct(sk);
181 }
182 
dccp_init_sock(struct sock * sk,const __u8 ctl_sock_initialized)183 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
184 {
185 	struct dccp_sock *dp = dccp_sk(sk);
186 	struct inet_connection_sock *icsk = inet_csk(sk);
187 
188 	icsk->icsk_rto		= DCCP_TIMEOUT_INIT;
189 	icsk->icsk_syn_retries	= sysctl_dccp_request_retries;
190 	sk->sk_state		= DCCP_CLOSED;
191 	sk->sk_write_space	= dccp_write_space;
192 	sk->sk_destruct		= dccp_sk_destruct;
193 	icsk->icsk_sync_mss	= dccp_sync_mss;
194 	dp->dccps_mss_cache	= 536;
195 	dp->dccps_rate_last	= jiffies;
196 	dp->dccps_role		= DCCP_ROLE_UNDEFINED;
197 	dp->dccps_service	= DCCP_SERVICE_CODE_IS_ABSENT;
198 	dp->dccps_tx_qlen	= sysctl_dccp_tx_qlen;
199 
200 	dccp_init_xmit_timers(sk);
201 
202 	INIT_LIST_HEAD(&dp->dccps_featneg);
203 	/* control socket doesn't need feat nego */
204 	if (likely(ctl_sock_initialized))
205 		return dccp_feat_init(sk);
206 	return 0;
207 }
208 
209 EXPORT_SYMBOL_GPL(dccp_init_sock);
210 
dccp_destroy_sock(struct sock * sk)211 void dccp_destroy_sock(struct sock *sk)
212 {
213 	struct dccp_sock *dp = dccp_sk(sk);
214 
215 	__skb_queue_purge(&sk->sk_write_queue);
216 	if (sk->sk_send_head != NULL) {
217 		kfree_skb(sk->sk_send_head);
218 		sk->sk_send_head = NULL;
219 	}
220 
221 	/* Clean up a referenced DCCP bind bucket. */
222 	if (inet_csk(sk)->icsk_bind_hash != NULL)
223 		inet_put_port(sk);
224 
225 	kfree(dp->dccps_service_list);
226 	dp->dccps_service_list = NULL;
227 
228 	if (dp->dccps_hc_rx_ackvec != NULL) {
229 		dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
230 		dp->dccps_hc_rx_ackvec = NULL;
231 	}
232 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
233 	dp->dccps_hc_rx_ccid = NULL;
234 
235 	/* clean up feature negotiation state */
236 	dccp_feat_list_purge(&dp->dccps_featneg);
237 }
238 
239 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
240 
dccp_listen_start(struct sock * sk,int backlog)241 static inline int dccp_listen_start(struct sock *sk, int backlog)
242 {
243 	struct dccp_sock *dp = dccp_sk(sk);
244 
245 	dp->dccps_role = DCCP_ROLE_LISTEN;
246 	/* do not start to listen if feature negotiation setup fails */
247 	if (dccp_feat_finalise_settings(dp))
248 		return -EPROTO;
249 	return inet_csk_listen_start(sk, backlog);
250 }
251 
dccp_need_reset(int state)252 static inline int dccp_need_reset(int state)
253 {
254 	return state != DCCP_CLOSED && state != DCCP_LISTEN &&
255 	       state != DCCP_REQUESTING;
256 }
257 
dccp_disconnect(struct sock * sk,int flags)258 int dccp_disconnect(struct sock *sk, int flags)
259 {
260 	struct inet_connection_sock *icsk = inet_csk(sk);
261 	struct inet_sock *inet = inet_sk(sk);
262 	int err = 0;
263 	const int old_state = sk->sk_state;
264 
265 	if (old_state != DCCP_CLOSED)
266 		dccp_set_state(sk, DCCP_CLOSED);
267 
268 	/*
269 	 * This corresponds to the ABORT function of RFC793, sec. 3.8
270 	 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
271 	 */
272 	if (old_state == DCCP_LISTEN) {
273 		inet_csk_listen_stop(sk);
274 	} else if (dccp_need_reset(old_state)) {
275 		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
276 		sk->sk_err = ECONNRESET;
277 	} else if (old_state == DCCP_REQUESTING)
278 		sk->sk_err = ECONNRESET;
279 
280 	dccp_clear_xmit_timers(sk);
281 
282 	__skb_queue_purge(&sk->sk_receive_queue);
283 	__skb_queue_purge(&sk->sk_write_queue);
284 	if (sk->sk_send_head != NULL) {
285 		__kfree_skb(sk->sk_send_head);
286 		sk->sk_send_head = NULL;
287 	}
288 
289 	inet->inet_dport = 0;
290 
291 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
292 		inet_reset_saddr(sk);
293 
294 	sk->sk_shutdown = 0;
295 	sock_reset_flag(sk, SOCK_DONE);
296 
297 	icsk->icsk_backoff = 0;
298 	inet_csk_delack_init(sk);
299 	__sk_dst_reset(sk);
300 
301 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
302 
303 	sk->sk_error_report(sk);
304 	return err;
305 }
306 
307 EXPORT_SYMBOL_GPL(dccp_disconnect);
308 
309 /*
310  *	Wait for a DCCP event.
311  *
312  *	Note that we don't need to lock the socket, as the upper poll layers
313  *	take care of normal races (between the test and the event) and we don't
314  *	go look at any of the socket buffers directly.
315  */
dccp_poll(struct file * file,struct socket * sock,poll_table * wait)316 unsigned int dccp_poll(struct file *file, struct socket *sock,
317 		       poll_table *wait)
318 {
319 	unsigned int mask;
320 	struct sock *sk = sock->sk;
321 
322 	sock_poll_wait(file, sk_sleep(sk), wait);
323 	if (sk->sk_state == DCCP_LISTEN)
324 		return inet_csk_listen_poll(sk);
325 
326 	/* Socket is not locked. We are protected from async events
327 	   by poll logic and correct handling of state changes
328 	   made by another threads is impossible in any case.
329 	 */
330 
331 	mask = 0;
332 	if (sk->sk_err)
333 		mask = POLLERR;
334 
335 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
336 		mask |= POLLHUP;
337 	if (sk->sk_shutdown & RCV_SHUTDOWN)
338 		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
339 
340 	/* Connected? */
341 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
342 		if (atomic_read(&sk->sk_rmem_alloc) > 0)
343 			mask |= POLLIN | POLLRDNORM;
344 
345 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
346 			if (sk_stream_is_writeable(sk)) {
347 				mask |= POLLOUT | POLLWRNORM;
348 			} else {  /* send SIGIO later */
349 				set_bit(SOCK_ASYNC_NOSPACE,
350 					&sk->sk_socket->flags);
351 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
352 
353 				/* Race breaker. If space is freed after
354 				 * wspace test but before the flags are set,
355 				 * IO signal will be lost.
356 				 */
357 				if (sk_stream_is_writeable(sk))
358 					mask |= POLLOUT | POLLWRNORM;
359 			}
360 		}
361 	}
362 	return mask;
363 }
364 
365 EXPORT_SYMBOL_GPL(dccp_poll);
366 
dccp_ioctl(struct sock * sk,int cmd,unsigned long arg)367 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
368 {
369 	int rc = -ENOTCONN;
370 
371 	lock_sock(sk);
372 
373 	if (sk->sk_state == DCCP_LISTEN)
374 		goto out;
375 
376 	switch (cmd) {
377 	case SIOCINQ: {
378 		struct sk_buff *skb;
379 		unsigned long amount = 0;
380 
381 		skb = skb_peek(&sk->sk_receive_queue);
382 		if (skb != NULL) {
383 			/*
384 			 * We will only return the amount of this packet since
385 			 * that is all that will be read.
386 			 */
387 			amount = skb->len;
388 		}
389 		rc = put_user(amount, (int __user *)arg);
390 	}
391 		break;
392 	default:
393 		rc = -ENOIOCTLCMD;
394 		break;
395 	}
396 out:
397 	release_sock(sk);
398 	return rc;
399 }
400 
401 EXPORT_SYMBOL_GPL(dccp_ioctl);
402 
dccp_setsockopt_service(struct sock * sk,const __be32 service,char __user * optval,unsigned int optlen)403 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
404 				   char __user *optval, unsigned int optlen)
405 {
406 	struct dccp_sock *dp = dccp_sk(sk);
407 	struct dccp_service_list *sl = NULL;
408 
409 	if (service == DCCP_SERVICE_INVALID_VALUE ||
410 	    optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
411 		return -EINVAL;
412 
413 	if (optlen > sizeof(service)) {
414 		sl = kmalloc(optlen, GFP_KERNEL);
415 		if (sl == NULL)
416 			return -ENOMEM;
417 
418 		sl->dccpsl_nr = optlen / sizeof(u32) - 1;
419 		if (copy_from_user(sl->dccpsl_list,
420 				   optval + sizeof(service),
421 				   optlen - sizeof(service)) ||
422 		    dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
423 			kfree(sl);
424 			return -EFAULT;
425 		}
426 	}
427 
428 	lock_sock(sk);
429 	dp->dccps_service = service;
430 
431 	kfree(dp->dccps_service_list);
432 
433 	dp->dccps_service_list = sl;
434 	release_sock(sk);
435 	return 0;
436 }
437 
dccp_setsockopt_cscov(struct sock * sk,int cscov,bool rx)438 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
439 {
440 	u8 *list, len;
441 	int i, rc;
442 
443 	if (cscov < 0 || cscov > 15)
444 		return -EINVAL;
445 	/*
446 	 * Populate a list of permissible values, in the range cscov...15. This
447 	 * is necessary since feature negotiation of single values only works if
448 	 * both sides incidentally choose the same value. Since the list starts
449 	 * lowest-value first, negotiation will pick the smallest shared value.
450 	 */
451 	if (cscov == 0)
452 		return 0;
453 	len = 16 - cscov;
454 
455 	list = kmalloc(len, GFP_KERNEL);
456 	if (list == NULL)
457 		return -ENOBUFS;
458 
459 	for (i = 0; i < len; i++)
460 		list[i] = cscov++;
461 
462 	rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
463 
464 	if (rc == 0) {
465 		if (rx)
466 			dccp_sk(sk)->dccps_pcrlen = cscov;
467 		else
468 			dccp_sk(sk)->dccps_pcslen = cscov;
469 	}
470 	kfree(list);
471 	return rc;
472 }
473 
dccp_setsockopt_ccid(struct sock * sk,int type,char __user * optval,unsigned int optlen)474 static int dccp_setsockopt_ccid(struct sock *sk, int type,
475 				char __user *optval, unsigned int optlen)
476 {
477 	u8 *val;
478 	int rc = 0;
479 
480 	if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
481 		return -EINVAL;
482 
483 	val = memdup_user(optval, optlen);
484 	if (IS_ERR(val))
485 		return PTR_ERR(val);
486 
487 	lock_sock(sk);
488 	if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
489 		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
490 
491 	if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
492 		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
493 	release_sock(sk);
494 
495 	kfree(val);
496 	return rc;
497 }
498 
do_dccp_setsockopt(struct sock * sk,int level,int optname,char __user * optval,unsigned int optlen)499 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
500 		char __user *optval, unsigned int optlen)
501 {
502 	struct dccp_sock *dp = dccp_sk(sk);
503 	int val, err = 0;
504 
505 	switch (optname) {
506 	case DCCP_SOCKOPT_PACKET_SIZE:
507 		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
508 		return 0;
509 	case DCCP_SOCKOPT_CHANGE_L:
510 	case DCCP_SOCKOPT_CHANGE_R:
511 		DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
512 		return 0;
513 	case DCCP_SOCKOPT_CCID:
514 	case DCCP_SOCKOPT_RX_CCID:
515 	case DCCP_SOCKOPT_TX_CCID:
516 		return dccp_setsockopt_ccid(sk, optname, optval, optlen);
517 	}
518 
519 	if (optlen < (int)sizeof(int))
520 		return -EINVAL;
521 
522 	if (get_user(val, (int __user *)optval))
523 		return -EFAULT;
524 
525 	if (optname == DCCP_SOCKOPT_SERVICE)
526 		return dccp_setsockopt_service(sk, val, optval, optlen);
527 
528 	lock_sock(sk);
529 	switch (optname) {
530 	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
531 		if (dp->dccps_role != DCCP_ROLE_SERVER)
532 			err = -EOPNOTSUPP;
533 		else
534 			dp->dccps_server_timewait = (val != 0);
535 		break;
536 	case DCCP_SOCKOPT_SEND_CSCOV:
537 		err = dccp_setsockopt_cscov(sk, val, false);
538 		break;
539 	case DCCP_SOCKOPT_RECV_CSCOV:
540 		err = dccp_setsockopt_cscov(sk, val, true);
541 		break;
542 	case DCCP_SOCKOPT_QPOLICY_ID:
543 		if (sk->sk_state != DCCP_CLOSED)
544 			err = -EISCONN;
545 		else if (val < 0 || val >= DCCPQ_POLICY_MAX)
546 			err = -EINVAL;
547 		else
548 			dp->dccps_qpolicy = val;
549 		break;
550 	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
551 		if (val < 0)
552 			err = -EINVAL;
553 		else
554 			dp->dccps_tx_qlen = val;
555 		break;
556 	default:
557 		err = -ENOPROTOOPT;
558 		break;
559 	}
560 	release_sock(sk);
561 
562 	return err;
563 }
564 
dccp_setsockopt(struct sock * sk,int level,int optname,char __user * optval,unsigned int optlen)565 int dccp_setsockopt(struct sock *sk, int level, int optname,
566 		    char __user *optval, unsigned int optlen)
567 {
568 	if (level != SOL_DCCP)
569 		return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
570 							     optname, optval,
571 							     optlen);
572 	return do_dccp_setsockopt(sk, level, optname, optval, optlen);
573 }
574 
575 EXPORT_SYMBOL_GPL(dccp_setsockopt);
576 
577 #ifdef CONFIG_COMPAT
compat_dccp_setsockopt(struct sock * sk,int level,int optname,char __user * optval,unsigned int optlen)578 int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
579 			   char __user *optval, unsigned int optlen)
580 {
581 	if (level != SOL_DCCP)
582 		return inet_csk_compat_setsockopt(sk, level, optname,
583 						  optval, optlen);
584 	return do_dccp_setsockopt(sk, level, optname, optval, optlen);
585 }
586 
587 EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
588 #endif
589 
dccp_getsockopt_service(struct sock * sk,int len,__be32 __user * optval,int __user * optlen)590 static int dccp_getsockopt_service(struct sock *sk, int len,
591 				   __be32 __user *optval,
592 				   int __user *optlen)
593 {
594 	const struct dccp_sock *dp = dccp_sk(sk);
595 	const struct dccp_service_list *sl;
596 	int err = -ENOENT, slen = 0, total_len = sizeof(u32);
597 
598 	lock_sock(sk);
599 	if ((sl = dp->dccps_service_list) != NULL) {
600 		slen = sl->dccpsl_nr * sizeof(u32);
601 		total_len += slen;
602 	}
603 
604 	err = -EINVAL;
605 	if (total_len > len)
606 		goto out;
607 
608 	err = 0;
609 	if (put_user(total_len, optlen) ||
610 	    put_user(dp->dccps_service, optval) ||
611 	    (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
612 		err = -EFAULT;
613 out:
614 	release_sock(sk);
615 	return err;
616 }
617 
do_dccp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)618 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
619 		    char __user *optval, int __user *optlen)
620 {
621 	struct dccp_sock *dp;
622 	int val, len;
623 
624 	if (get_user(len, optlen))
625 		return -EFAULT;
626 
627 	if (len < (int)sizeof(int))
628 		return -EINVAL;
629 
630 	dp = dccp_sk(sk);
631 
632 	switch (optname) {
633 	case DCCP_SOCKOPT_PACKET_SIZE:
634 		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
635 		return 0;
636 	case DCCP_SOCKOPT_SERVICE:
637 		return dccp_getsockopt_service(sk, len,
638 					       (__be32 __user *)optval, optlen);
639 	case DCCP_SOCKOPT_GET_CUR_MPS:
640 		val = dp->dccps_mss_cache;
641 		break;
642 	case DCCP_SOCKOPT_AVAILABLE_CCIDS:
643 		return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
644 	case DCCP_SOCKOPT_TX_CCID:
645 		val = ccid_get_current_tx_ccid(dp);
646 		if (val < 0)
647 			return -ENOPROTOOPT;
648 		break;
649 	case DCCP_SOCKOPT_RX_CCID:
650 		val = ccid_get_current_rx_ccid(dp);
651 		if (val < 0)
652 			return -ENOPROTOOPT;
653 		break;
654 	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
655 		val = dp->dccps_server_timewait;
656 		break;
657 	case DCCP_SOCKOPT_SEND_CSCOV:
658 		val = dp->dccps_pcslen;
659 		break;
660 	case DCCP_SOCKOPT_RECV_CSCOV:
661 		val = dp->dccps_pcrlen;
662 		break;
663 	case DCCP_SOCKOPT_QPOLICY_ID:
664 		val = dp->dccps_qpolicy;
665 		break;
666 	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
667 		val = dp->dccps_tx_qlen;
668 		break;
669 	case 128 ... 191:
670 		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
671 					     len, (u32 __user *)optval, optlen);
672 	case 192 ... 255:
673 		return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
674 					     len, (u32 __user *)optval, optlen);
675 	default:
676 		return -ENOPROTOOPT;
677 	}
678 
679 	len = sizeof(val);
680 	if (put_user(len, optlen) || copy_to_user(optval, &val, len))
681 		return -EFAULT;
682 
683 	return 0;
684 }
685 
dccp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)686 int dccp_getsockopt(struct sock *sk, int level, int optname,
687 		    char __user *optval, int __user *optlen)
688 {
689 	if (level != SOL_DCCP)
690 		return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
691 							     optname, optval,
692 							     optlen);
693 	return do_dccp_getsockopt(sk, level, optname, optval, optlen);
694 }
695 
696 EXPORT_SYMBOL_GPL(dccp_getsockopt);
697 
698 #ifdef CONFIG_COMPAT
compat_dccp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)699 int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
700 			   char __user *optval, int __user *optlen)
701 {
702 	if (level != SOL_DCCP)
703 		return inet_csk_compat_getsockopt(sk, level, optname,
704 						  optval, optlen);
705 	return do_dccp_getsockopt(sk, level, optname, optval, optlen);
706 }
707 
708 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
709 #endif
710 
dccp_msghdr_parse(struct msghdr * msg,struct sk_buff * skb)711 static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
712 {
713 	struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
714 
715 	/*
716 	 * Assign an (opaque) qpolicy priority value to skb->priority.
717 	 *
718 	 * We are overloading this skb field for use with the qpolicy subystem.
719 	 * The skb->priority is normally used for the SO_PRIORITY option, which
720 	 * is initialised from sk_priority. Since the assignment of sk_priority
721 	 * to skb->priority happens later (on layer 3), we overload this field
722 	 * for use with queueing priorities as long as the skb is on layer 4.
723 	 * The default priority value (if nothing is set) is 0.
724 	 */
725 	skb->priority = 0;
726 
727 	for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
728 
729 		if (!CMSG_OK(msg, cmsg))
730 			return -EINVAL;
731 
732 		if (cmsg->cmsg_level != SOL_DCCP)
733 			continue;
734 
735 		if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
736 		    !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
737 			return -EINVAL;
738 
739 		switch (cmsg->cmsg_type) {
740 		case DCCP_SCM_PRIORITY:
741 			if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
742 				return -EINVAL;
743 			skb->priority = *(__u32 *)CMSG_DATA(cmsg);
744 			break;
745 		default:
746 			return -EINVAL;
747 		}
748 	}
749 	return 0;
750 }
751 
dccp_sendmsg(struct kiocb * iocb,struct sock * sk,struct msghdr * msg,size_t len)752 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
753 		 size_t len)
754 {
755 	const struct dccp_sock *dp = dccp_sk(sk);
756 	const int flags = msg->msg_flags;
757 	const int noblock = flags & MSG_DONTWAIT;
758 	struct sk_buff *skb;
759 	int rc, size;
760 	long timeo;
761 
762 	if (len > dp->dccps_mss_cache)
763 		return -EMSGSIZE;
764 
765 	lock_sock(sk);
766 
767 	if (dccp_qpolicy_full(sk)) {
768 		rc = -EAGAIN;
769 		goto out_release;
770 	}
771 
772 	timeo = sock_sndtimeo(sk, noblock);
773 
774 	/*
775 	 * We have to use sk_stream_wait_connect here to set sk_write_pending,
776 	 * so that the trick in dccp_rcv_request_sent_state_process.
777 	 */
778 	/* Wait for a connection to finish. */
779 	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
780 		if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
781 			goto out_release;
782 
783 	size = sk->sk_prot->max_header + len;
784 	release_sock(sk);
785 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
786 	lock_sock(sk);
787 	if (skb == NULL)
788 		goto out_release;
789 
790 	skb_reserve(skb, sk->sk_prot->max_header);
791 	rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
792 	if (rc != 0)
793 		goto out_discard;
794 
795 	rc = dccp_msghdr_parse(msg, skb);
796 	if (rc != 0)
797 		goto out_discard;
798 
799 	dccp_qpolicy_push(sk, skb);
800 	/*
801 	 * The xmit_timer is set if the TX CCID is rate-based and will expire
802 	 * when congestion control permits to release further packets into the
803 	 * network. Window-based CCIDs do not use this timer.
804 	 */
805 	if (!timer_pending(&dp->dccps_xmit_timer))
806 		dccp_write_xmit(sk);
807 out_release:
808 	release_sock(sk);
809 	return rc ? : len;
810 out_discard:
811 	kfree_skb(skb);
812 	goto out_release;
813 }
814 
815 EXPORT_SYMBOL_GPL(dccp_sendmsg);
816 
dccp_recvmsg(struct kiocb * iocb,struct sock * sk,struct msghdr * msg,size_t len,int nonblock,int flags,int * addr_len)817 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
818 		 size_t len, int nonblock, int flags, int *addr_len)
819 {
820 	const struct dccp_hdr *dh;
821 	long timeo;
822 
823 	lock_sock(sk);
824 
825 	if (sk->sk_state == DCCP_LISTEN) {
826 		len = -ENOTCONN;
827 		goto out;
828 	}
829 
830 	timeo = sock_rcvtimeo(sk, nonblock);
831 
832 	do {
833 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
834 
835 		if (skb == NULL)
836 			goto verify_sock_status;
837 
838 		dh = dccp_hdr(skb);
839 
840 		switch (dh->dccph_type) {
841 		case DCCP_PKT_DATA:
842 		case DCCP_PKT_DATAACK:
843 			goto found_ok_skb;
844 
845 		case DCCP_PKT_CLOSE:
846 		case DCCP_PKT_CLOSEREQ:
847 			if (!(flags & MSG_PEEK))
848 				dccp_finish_passive_close(sk);
849 			/* fall through */
850 		case DCCP_PKT_RESET:
851 			dccp_pr_debug("found fin (%s) ok!\n",
852 				      dccp_packet_name(dh->dccph_type));
853 			len = 0;
854 			goto found_fin_ok;
855 		default:
856 			dccp_pr_debug("packet_type=%s\n",
857 				      dccp_packet_name(dh->dccph_type));
858 			sk_eat_skb(sk, skb);
859 		}
860 verify_sock_status:
861 		if (sock_flag(sk, SOCK_DONE)) {
862 			len = 0;
863 			break;
864 		}
865 
866 		if (sk->sk_err) {
867 			len = sock_error(sk);
868 			break;
869 		}
870 
871 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
872 			len = 0;
873 			break;
874 		}
875 
876 		if (sk->sk_state == DCCP_CLOSED) {
877 			if (!sock_flag(sk, SOCK_DONE)) {
878 				/* This occurs when user tries to read
879 				 * from never connected socket.
880 				 */
881 				len = -ENOTCONN;
882 				break;
883 			}
884 			len = 0;
885 			break;
886 		}
887 
888 		if (!timeo) {
889 			len = -EAGAIN;
890 			break;
891 		}
892 
893 		if (signal_pending(current)) {
894 			len = sock_intr_errno(timeo);
895 			break;
896 		}
897 
898 		sk_wait_data(sk, &timeo);
899 		continue;
900 	found_ok_skb:
901 		if (len > skb->len)
902 			len = skb->len;
903 		else if (len < skb->len)
904 			msg->msg_flags |= MSG_TRUNC;
905 
906 		if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
907 			/* Exception. Bailout! */
908 			len = -EFAULT;
909 			break;
910 		}
911 		if (flags & MSG_TRUNC)
912 			len = skb->len;
913 	found_fin_ok:
914 		if (!(flags & MSG_PEEK))
915 			sk_eat_skb(sk, skb);
916 		break;
917 	} while (1);
918 out:
919 	release_sock(sk);
920 	return len;
921 }
922 
923 EXPORT_SYMBOL_GPL(dccp_recvmsg);
924 
inet_dccp_listen(struct socket * sock,int backlog)925 int inet_dccp_listen(struct socket *sock, int backlog)
926 {
927 	struct sock *sk = sock->sk;
928 	unsigned char old_state;
929 	int err;
930 
931 	lock_sock(sk);
932 
933 	err = -EINVAL;
934 	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
935 		goto out;
936 
937 	old_state = sk->sk_state;
938 	if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
939 		goto out;
940 
941 	/* Really, if the socket is already in listen state
942 	 * we can only allow the backlog to be adjusted.
943 	 */
944 	if (old_state != DCCP_LISTEN) {
945 		/*
946 		 * FIXME: here it probably should be sk->sk_prot->listen_start
947 		 * see tcp_listen_start
948 		 */
949 		err = dccp_listen_start(sk, backlog);
950 		if (err)
951 			goto out;
952 	}
953 	sk->sk_max_ack_backlog = backlog;
954 	err = 0;
955 
956 out:
957 	release_sock(sk);
958 	return err;
959 }
960 
961 EXPORT_SYMBOL_GPL(inet_dccp_listen);
962 
dccp_terminate_connection(struct sock * sk)963 static void dccp_terminate_connection(struct sock *sk)
964 {
965 	u8 next_state = DCCP_CLOSED;
966 
967 	switch (sk->sk_state) {
968 	case DCCP_PASSIVE_CLOSE:
969 	case DCCP_PASSIVE_CLOSEREQ:
970 		dccp_finish_passive_close(sk);
971 		break;
972 	case DCCP_PARTOPEN:
973 		dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
974 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
975 		/* fall through */
976 	case DCCP_OPEN:
977 		dccp_send_close(sk, 1);
978 
979 		if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
980 		    !dccp_sk(sk)->dccps_server_timewait)
981 			next_state = DCCP_ACTIVE_CLOSEREQ;
982 		else
983 			next_state = DCCP_CLOSING;
984 		/* fall through */
985 	default:
986 		dccp_set_state(sk, next_state);
987 	}
988 }
989 
dccp_close(struct sock * sk,long timeout)990 void dccp_close(struct sock *sk, long timeout)
991 {
992 	struct dccp_sock *dp = dccp_sk(sk);
993 	struct sk_buff *skb;
994 	u32 data_was_unread = 0;
995 	int state;
996 
997 	lock_sock(sk);
998 
999 	sk->sk_shutdown = SHUTDOWN_MASK;
1000 
1001 	if (sk->sk_state == DCCP_LISTEN) {
1002 		dccp_set_state(sk, DCCP_CLOSED);
1003 
1004 		/* Special case. */
1005 		inet_csk_listen_stop(sk);
1006 
1007 		goto adjudge_to_death;
1008 	}
1009 
1010 	sk_stop_timer(sk, &dp->dccps_xmit_timer);
1011 
1012 	/*
1013 	 * We need to flush the recv. buffs.  We do this only on the
1014 	 * descriptor close, not protocol-sourced closes, because the
1015 	  *reader process may not have drained the data yet!
1016 	 */
1017 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1018 		data_was_unread += skb->len;
1019 		__kfree_skb(skb);
1020 	}
1021 
1022 	if (data_was_unread) {
1023 		/* Unread data was tossed, send an appropriate Reset Code */
1024 		DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
1025 		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1026 		dccp_set_state(sk, DCCP_CLOSED);
1027 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1028 		/* Check zero linger _after_ checking for unread data. */
1029 		sk->sk_prot->disconnect(sk, 0);
1030 	} else if (sk->sk_state != DCCP_CLOSED) {
1031 		/*
1032 		 * Normal connection termination. May need to wait if there are
1033 		 * still packets in the TX queue that are delayed by the CCID.
1034 		 */
1035 		dccp_flush_write_queue(sk, &timeout);
1036 		dccp_terminate_connection(sk);
1037 	}
1038 
1039 	/*
1040 	 * Flush write queue. This may be necessary in several cases:
1041 	 * - we have been closed by the peer but still have application data;
1042 	 * - abortive termination (unread data or zero linger time),
1043 	 * - normal termination but queue could not be flushed within time limit
1044 	 */
1045 	__skb_queue_purge(&sk->sk_write_queue);
1046 
1047 	sk_stream_wait_close(sk, timeout);
1048 
1049 adjudge_to_death:
1050 	state = sk->sk_state;
1051 	sock_hold(sk);
1052 	sock_orphan(sk);
1053 
1054 	/*
1055 	 * It is the last release_sock in its life. It will remove backlog.
1056 	 */
1057 	release_sock(sk);
1058 	/*
1059 	 * Now socket is owned by kernel and we acquire BH lock
1060 	 * to finish close. No need to check for user refs.
1061 	 */
1062 	local_bh_disable();
1063 	bh_lock_sock(sk);
1064 	WARN_ON(sock_owned_by_user(sk));
1065 
1066 	percpu_counter_inc(sk->sk_prot->orphan_count);
1067 
1068 	/* Have we already been destroyed by a softirq or backlog? */
1069 	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1070 		goto out;
1071 
1072 	if (sk->sk_state == DCCP_CLOSED)
1073 		inet_csk_destroy_sock(sk);
1074 
1075 	/* Otherwise, socket is reprieved until protocol close. */
1076 
1077 out:
1078 	bh_unlock_sock(sk);
1079 	local_bh_enable();
1080 	sock_put(sk);
1081 }
1082 
1083 EXPORT_SYMBOL_GPL(dccp_close);
1084 
dccp_shutdown(struct sock * sk,int how)1085 void dccp_shutdown(struct sock *sk, int how)
1086 {
1087 	dccp_pr_debug("called shutdown(%x)\n", how);
1088 }
1089 
1090 EXPORT_SYMBOL_GPL(dccp_shutdown);
1091 
dccp_mib_init(void)1092 static inline int __init dccp_mib_init(void)
1093 {
1094 	dccp_statistics = alloc_percpu(struct dccp_mib);
1095 	if (!dccp_statistics)
1096 		return -ENOMEM;
1097 	return 0;
1098 }
1099 
dccp_mib_exit(void)1100 static inline void dccp_mib_exit(void)
1101 {
1102 	free_percpu(dccp_statistics);
1103 }
1104 
1105 static int thash_entries;
1106 module_param(thash_entries, int, 0444);
1107 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1108 
1109 #ifdef CONFIG_IP_DCCP_DEBUG
1110 bool dccp_debug;
1111 module_param(dccp_debug, bool, 0644);
1112 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1113 
1114 EXPORT_SYMBOL_GPL(dccp_debug);
1115 #endif
1116 
dccp_init(void)1117 static int __init dccp_init(void)
1118 {
1119 	unsigned long goal;
1120 	int ehash_order, bhash_order, i;
1121 	int rc;
1122 
1123 	BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1124 		     FIELD_SIZEOF(struct sk_buff, cb));
1125 	rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
1126 	if (rc)
1127 		goto out_fail;
1128 	rc = -ENOBUFS;
1129 	inet_hashinfo_init(&dccp_hashinfo);
1130 	dccp_hashinfo.bind_bucket_cachep =
1131 		kmem_cache_create("dccp_bind_bucket",
1132 				  sizeof(struct inet_bind_bucket), 0,
1133 				  SLAB_HWCACHE_ALIGN, NULL);
1134 	if (!dccp_hashinfo.bind_bucket_cachep)
1135 		goto out_free_percpu;
1136 
1137 	/*
1138 	 * Size and allocate the main established and bind bucket
1139 	 * hash tables.
1140 	 *
1141 	 * The methodology is similar to that of the buffer cache.
1142 	 */
1143 	if (totalram_pages >= (128 * 1024))
1144 		goal = totalram_pages >> (21 - PAGE_SHIFT);
1145 	else
1146 		goal = totalram_pages >> (23 - PAGE_SHIFT);
1147 
1148 	if (thash_entries)
1149 		goal = (thash_entries *
1150 			sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1151 	for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1152 		;
1153 	do {
1154 		unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1155 					sizeof(struct inet_ehash_bucket);
1156 
1157 		while (hash_size & (hash_size - 1))
1158 			hash_size--;
1159 		dccp_hashinfo.ehash_mask = hash_size - 1;
1160 		dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1161 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1162 	} while (!dccp_hashinfo.ehash && --ehash_order > 0);
1163 
1164 	if (!dccp_hashinfo.ehash) {
1165 		DCCP_CRIT("Failed to allocate DCCP established hash table");
1166 		goto out_free_bind_bucket_cachep;
1167 	}
1168 
1169 	for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
1170 		INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1171 
1172 	if (inet_ehash_locks_alloc(&dccp_hashinfo))
1173 			goto out_free_dccp_ehash;
1174 
1175 	bhash_order = ehash_order;
1176 
1177 	do {
1178 		dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1179 					sizeof(struct inet_bind_hashbucket);
1180 		if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1181 		    bhash_order > 0)
1182 			continue;
1183 		dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1184 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1185 	} while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1186 
1187 	if (!dccp_hashinfo.bhash) {
1188 		DCCP_CRIT("Failed to allocate DCCP bind hash table");
1189 		goto out_free_dccp_locks;
1190 	}
1191 
1192 	for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1193 		spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1194 		INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1195 	}
1196 
1197 	rc = dccp_mib_init();
1198 	if (rc)
1199 		goto out_free_dccp_bhash;
1200 
1201 	rc = dccp_ackvec_init();
1202 	if (rc)
1203 		goto out_free_dccp_mib;
1204 
1205 	rc = dccp_sysctl_init();
1206 	if (rc)
1207 		goto out_ackvec_exit;
1208 
1209 	rc = ccid_initialize_builtins();
1210 	if (rc)
1211 		goto out_sysctl_exit;
1212 
1213 	dccp_timestamping_init();
1214 
1215 	return 0;
1216 
1217 out_sysctl_exit:
1218 	dccp_sysctl_exit();
1219 out_ackvec_exit:
1220 	dccp_ackvec_exit();
1221 out_free_dccp_mib:
1222 	dccp_mib_exit();
1223 out_free_dccp_bhash:
1224 	free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1225 out_free_dccp_locks:
1226 	inet_ehash_locks_free(&dccp_hashinfo);
1227 out_free_dccp_ehash:
1228 	free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1229 out_free_bind_bucket_cachep:
1230 	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1231 out_free_percpu:
1232 	percpu_counter_destroy(&dccp_orphan_count);
1233 out_fail:
1234 	dccp_hashinfo.bhash = NULL;
1235 	dccp_hashinfo.ehash = NULL;
1236 	dccp_hashinfo.bind_bucket_cachep = NULL;
1237 	return rc;
1238 }
1239 
dccp_fini(void)1240 static void __exit dccp_fini(void)
1241 {
1242 	ccid_cleanup_builtins();
1243 	dccp_mib_exit();
1244 	free_pages((unsigned long)dccp_hashinfo.bhash,
1245 		   get_order(dccp_hashinfo.bhash_size *
1246 			     sizeof(struct inet_bind_hashbucket)));
1247 	free_pages((unsigned long)dccp_hashinfo.ehash,
1248 		   get_order((dccp_hashinfo.ehash_mask + 1) *
1249 			     sizeof(struct inet_ehash_bucket)));
1250 	inet_ehash_locks_free(&dccp_hashinfo);
1251 	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1252 	dccp_ackvec_exit();
1253 	dccp_sysctl_exit();
1254 	percpu_counter_destroy(&dccp_orphan_count);
1255 }
1256 
1257 module_init(dccp_init);
1258 module_exit(dccp_fini);
1259 
1260 MODULE_LICENSE("GPL");
1261 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1262 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
1263