• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  net/dccp/proto.c
4  *
5  *  An implementation of the DCCP protocol
6  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7  */
8 
9 #include <linux/dccp.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/skbuff.h>
15 #include <linux/netdevice.h>
16 #include <linux/in.h>
17 #include <linux/if_arp.h>
18 #include <linux/init.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <net/checksum.h>
22 
23 #include <net/inet_sock.h>
24 #include <net/inet_common.h>
25 #include <net/sock.h>
26 #include <net/xfrm.h>
27 
28 #include <asm/ioctls.h>
29 #include <linux/spinlock.h>
30 #include <linux/timer.h>
31 #include <linux/delay.h>
32 #include <linux/poll.h>
33 
34 #include "ccid.h"
35 #include "dccp.h"
36 #include "feat.h"
37 
38 #define CREATE_TRACE_POINTS
39 #include "trace.h"
40 
41 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
42 
43 EXPORT_SYMBOL_GPL(dccp_statistics);
44 
45 DEFINE_PER_CPU(unsigned int, dccp_orphan_count);
46 EXPORT_PER_CPU_SYMBOL_GPL(dccp_orphan_count);
47 
48 struct inet_hashinfo dccp_hashinfo;
49 EXPORT_SYMBOL_GPL(dccp_hashinfo);
50 
51 /* the maximum queue length for tx in packets. 0 is no limit */
52 int sysctl_dccp_tx_qlen __read_mostly = 5;
53 
54 #ifdef CONFIG_IP_DCCP_DEBUG
dccp_state_name(const int state)55 static const char *dccp_state_name(const int state)
56 {
57 	static const char *const dccp_state_names[] = {
58 	[DCCP_OPEN]		= "OPEN",
59 	[DCCP_REQUESTING]	= "REQUESTING",
60 	[DCCP_PARTOPEN]		= "PARTOPEN",
61 	[DCCP_LISTEN]		= "LISTEN",
62 	[DCCP_RESPOND]		= "RESPOND",
63 	[DCCP_CLOSING]		= "CLOSING",
64 	[DCCP_ACTIVE_CLOSEREQ]	= "CLOSEREQ",
65 	[DCCP_PASSIVE_CLOSE]	= "PASSIVE_CLOSE",
66 	[DCCP_PASSIVE_CLOSEREQ]	= "PASSIVE_CLOSEREQ",
67 	[DCCP_TIME_WAIT]	= "TIME_WAIT",
68 	[DCCP_CLOSED]		= "CLOSED",
69 	};
70 
71 	if (state >= DCCP_MAX_STATES)
72 		return "INVALID STATE!";
73 	else
74 		return dccp_state_names[state];
75 }
76 #endif
77 
dccp_set_state(struct sock * sk,const int state)78 void dccp_set_state(struct sock *sk, const int state)
79 {
80 	const int oldstate = sk->sk_state;
81 
82 	dccp_pr_debug("%s(%p)  %s  -->  %s\n", dccp_role(sk), sk,
83 		      dccp_state_name(oldstate), dccp_state_name(state));
84 	WARN_ON(state == oldstate);
85 
86 	switch (state) {
87 	case DCCP_OPEN:
88 		if (oldstate != DCCP_OPEN)
89 			DCCP_INC_STATS(DCCP_MIB_CURRESTAB);
90 		/* Client retransmits all Confirm options until entering OPEN */
91 		if (oldstate == DCCP_PARTOPEN)
92 			dccp_feat_list_purge(&dccp_sk(sk)->dccps_featneg);
93 		break;
94 
95 	case DCCP_CLOSED:
96 		if (oldstate == DCCP_OPEN || oldstate == DCCP_ACTIVE_CLOSEREQ ||
97 		    oldstate == DCCP_CLOSING)
98 			DCCP_INC_STATS(DCCP_MIB_ESTABRESETS);
99 
100 		sk->sk_prot->unhash(sk);
101 		if (inet_csk(sk)->icsk_bind_hash != NULL &&
102 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
103 			inet_put_port(sk);
104 		fallthrough;
105 	default:
106 		if (oldstate == DCCP_OPEN)
107 			DCCP_DEC_STATS(DCCP_MIB_CURRESTAB);
108 	}
109 
110 	/* Change state AFTER socket is unhashed to avoid closed
111 	 * socket sitting in hash tables.
112 	 */
113 	inet_sk_set_state(sk, state);
114 }
115 
116 EXPORT_SYMBOL_GPL(dccp_set_state);
117 
dccp_finish_passive_close(struct sock * sk)118 static void dccp_finish_passive_close(struct sock *sk)
119 {
120 	switch (sk->sk_state) {
121 	case DCCP_PASSIVE_CLOSE:
122 		/* Node (client or server) has received Close packet. */
123 		dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED);
124 		dccp_set_state(sk, DCCP_CLOSED);
125 		break;
126 	case DCCP_PASSIVE_CLOSEREQ:
127 		/*
128 		 * Client received CloseReq. We set the `active' flag so that
129 		 * dccp_send_close() retransmits the Close as per RFC 4340, 8.3.
130 		 */
131 		dccp_send_close(sk, 1);
132 		dccp_set_state(sk, DCCP_CLOSING);
133 	}
134 }
135 
dccp_done(struct sock * sk)136 void dccp_done(struct sock *sk)
137 {
138 	dccp_set_state(sk, DCCP_CLOSED);
139 	dccp_clear_xmit_timers(sk);
140 
141 	sk->sk_shutdown = SHUTDOWN_MASK;
142 
143 	if (!sock_flag(sk, SOCK_DEAD))
144 		sk->sk_state_change(sk);
145 	else
146 		inet_csk_destroy_sock(sk);
147 }
148 
149 EXPORT_SYMBOL_GPL(dccp_done);
150 
dccp_packet_name(const int type)151 const char *dccp_packet_name(const int type)
152 {
153 	static const char *const dccp_packet_names[] = {
154 		[DCCP_PKT_REQUEST]  = "REQUEST",
155 		[DCCP_PKT_RESPONSE] = "RESPONSE",
156 		[DCCP_PKT_DATA]	    = "DATA",
157 		[DCCP_PKT_ACK]	    = "ACK",
158 		[DCCP_PKT_DATAACK]  = "DATAACK",
159 		[DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
160 		[DCCP_PKT_CLOSE]    = "CLOSE",
161 		[DCCP_PKT_RESET]    = "RESET",
162 		[DCCP_PKT_SYNC]	    = "SYNC",
163 		[DCCP_PKT_SYNCACK]  = "SYNCACK",
164 	};
165 
166 	if (type >= DCCP_NR_PKT_TYPES)
167 		return "INVALID";
168 	else
169 		return dccp_packet_names[type];
170 }
171 
172 EXPORT_SYMBOL_GPL(dccp_packet_name);
173 
dccp_destruct_common(struct sock * sk)174 void dccp_destruct_common(struct sock *sk)
175 {
176 	struct dccp_sock *dp = dccp_sk(sk);
177 
178 	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
179 	dp->dccps_hc_tx_ccid = NULL;
180 }
181 EXPORT_SYMBOL_GPL(dccp_destruct_common);
182 
dccp_sk_destruct(struct sock * sk)183 static void dccp_sk_destruct(struct sock *sk)
184 {
185 	dccp_destruct_common(sk);
186 	inet_sock_destruct(sk);
187 }
188 
dccp_init_sock(struct sock * sk,const __u8 ctl_sock_initialized)189 int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
190 {
191 	struct dccp_sock *dp = dccp_sk(sk);
192 	struct inet_connection_sock *icsk = inet_csk(sk);
193 
194 	icsk->icsk_rto		= DCCP_TIMEOUT_INIT;
195 	icsk->icsk_syn_retries	= sysctl_dccp_request_retries;
196 	sk->sk_state		= DCCP_CLOSED;
197 	sk->sk_write_space	= dccp_write_space;
198 	sk->sk_destruct		= dccp_sk_destruct;
199 	icsk->icsk_sync_mss	= dccp_sync_mss;
200 	dp->dccps_mss_cache	= 536;
201 	dp->dccps_rate_last	= jiffies;
202 	dp->dccps_role		= DCCP_ROLE_UNDEFINED;
203 	dp->dccps_service	= DCCP_SERVICE_CODE_IS_ABSENT;
204 	dp->dccps_tx_qlen	= sysctl_dccp_tx_qlen;
205 
206 	dccp_init_xmit_timers(sk);
207 
208 	INIT_LIST_HEAD(&dp->dccps_featneg);
209 	/* control socket doesn't need feat nego */
210 	if (likely(ctl_sock_initialized))
211 		return dccp_feat_init(sk);
212 	return 0;
213 }
214 
215 EXPORT_SYMBOL_GPL(dccp_init_sock);
216 
dccp_destroy_sock(struct sock * sk)217 void dccp_destroy_sock(struct sock *sk)
218 {
219 	struct dccp_sock *dp = dccp_sk(sk);
220 
221 	__skb_queue_purge(&sk->sk_write_queue);
222 	if (sk->sk_send_head != NULL) {
223 		kfree_skb(sk->sk_send_head);
224 		sk->sk_send_head = NULL;
225 	}
226 
227 	/* Clean up a referenced DCCP bind bucket. */
228 	if (inet_csk(sk)->icsk_bind_hash != NULL)
229 		inet_put_port(sk);
230 
231 	kfree(dp->dccps_service_list);
232 	dp->dccps_service_list = NULL;
233 
234 	if (dp->dccps_hc_rx_ackvec != NULL) {
235 		dccp_ackvec_free(dp->dccps_hc_rx_ackvec);
236 		dp->dccps_hc_rx_ackvec = NULL;
237 	}
238 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
239 	dp->dccps_hc_rx_ccid = NULL;
240 
241 	/* clean up feature negotiation state */
242 	dccp_feat_list_purge(&dp->dccps_featneg);
243 }
244 
245 EXPORT_SYMBOL_GPL(dccp_destroy_sock);
246 
dccp_listen_start(struct sock * sk,int backlog)247 static inline int dccp_listen_start(struct sock *sk, int backlog)
248 {
249 	struct dccp_sock *dp = dccp_sk(sk);
250 
251 	dp->dccps_role = DCCP_ROLE_LISTEN;
252 	/* do not start to listen if feature negotiation setup fails */
253 	if (dccp_feat_finalise_settings(dp))
254 		return -EPROTO;
255 	return inet_csk_listen_start(sk, backlog);
256 }
257 
dccp_need_reset(int state)258 static inline int dccp_need_reset(int state)
259 {
260 	return state != DCCP_CLOSED && state != DCCP_LISTEN &&
261 	       state != DCCP_REQUESTING;
262 }
263 
dccp_disconnect(struct sock * sk,int flags)264 int dccp_disconnect(struct sock *sk, int flags)
265 {
266 	struct inet_connection_sock *icsk = inet_csk(sk);
267 	struct inet_sock *inet = inet_sk(sk);
268 	struct dccp_sock *dp = dccp_sk(sk);
269 	const int old_state = sk->sk_state;
270 
271 	if (old_state != DCCP_CLOSED)
272 		dccp_set_state(sk, DCCP_CLOSED);
273 
274 	/*
275 	 * This corresponds to the ABORT function of RFC793, sec. 3.8
276 	 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
277 	 */
278 	if (old_state == DCCP_LISTEN) {
279 		inet_csk_listen_stop(sk);
280 	} else if (dccp_need_reset(old_state)) {
281 		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
282 		sk->sk_err = ECONNRESET;
283 	} else if (old_state == DCCP_REQUESTING)
284 		sk->sk_err = ECONNRESET;
285 
286 	dccp_clear_xmit_timers(sk);
287 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
288 	dp->dccps_hc_rx_ccid = NULL;
289 
290 	__skb_queue_purge(&sk->sk_receive_queue);
291 	__skb_queue_purge(&sk->sk_write_queue);
292 	if (sk->sk_send_head != NULL) {
293 		__kfree_skb(sk->sk_send_head);
294 		sk->sk_send_head = NULL;
295 	}
296 
297 	inet->inet_dport = 0;
298 
299 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
300 		inet_reset_saddr(sk);
301 
302 	sk->sk_shutdown = 0;
303 	sock_reset_flag(sk, SOCK_DONE);
304 
305 	icsk->icsk_backoff = 0;
306 	inet_csk_delack_init(sk);
307 	__sk_dst_reset(sk);
308 
309 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
310 
311 	sk->sk_error_report(sk);
312 	return 0;
313 }
314 
315 EXPORT_SYMBOL_GPL(dccp_disconnect);
316 
317 /*
318  *	Wait for a DCCP event.
319  *
320  *	Note that we don't need to lock the socket, as the upper poll layers
321  *	take care of normal races (between the test and the event) and we don't
322  *	go look at any of the socket buffers directly.
323  */
dccp_poll(struct file * file,struct socket * sock,poll_table * wait)324 __poll_t dccp_poll(struct file *file, struct socket *sock,
325 		       poll_table *wait)
326 {
327 	__poll_t mask;
328 	struct sock *sk = sock->sk;
329 
330 	sock_poll_wait(file, sock, wait);
331 	if (sk->sk_state == DCCP_LISTEN)
332 		return inet_csk_listen_poll(sk);
333 
334 	/* Socket is not locked. We are protected from async events
335 	   by poll logic and correct handling of state changes
336 	   made by another threads is impossible in any case.
337 	 */
338 
339 	mask = 0;
340 	if (sk->sk_err)
341 		mask = EPOLLERR;
342 
343 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
344 		mask |= EPOLLHUP;
345 	if (sk->sk_shutdown & RCV_SHUTDOWN)
346 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
347 
348 	/* Connected? */
349 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
350 		if (atomic_read(&sk->sk_rmem_alloc) > 0)
351 			mask |= EPOLLIN | EPOLLRDNORM;
352 
353 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
354 			if (sk_stream_is_writeable(sk)) {
355 				mask |= EPOLLOUT | EPOLLWRNORM;
356 			} else {  /* send SIGIO later */
357 				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
358 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
359 
360 				/* Race breaker. If space is freed after
361 				 * wspace test but before the flags are set,
362 				 * IO signal will be lost.
363 				 */
364 				if (sk_stream_is_writeable(sk))
365 					mask |= EPOLLOUT | EPOLLWRNORM;
366 			}
367 		}
368 	}
369 	return mask;
370 }
371 
372 EXPORT_SYMBOL_GPL(dccp_poll);
373 
dccp_ioctl(struct sock * sk,int cmd,unsigned long arg)374 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
375 {
376 	int rc = -ENOTCONN;
377 
378 	lock_sock(sk);
379 
380 	if (sk->sk_state == DCCP_LISTEN)
381 		goto out;
382 
383 	switch (cmd) {
384 	case SIOCOUTQ: {
385 		int amount = sk_wmem_alloc_get(sk);
386 		/* Using sk_wmem_alloc here because sk_wmem_queued is not used by DCCP and
387 		 * always 0, comparably to UDP.
388 		 */
389 
390 		rc = put_user(amount, (int __user *)arg);
391 	}
392 		break;
393 	case SIOCINQ: {
394 		struct sk_buff *skb;
395 		unsigned long amount = 0;
396 
397 		skb = skb_peek(&sk->sk_receive_queue);
398 		if (skb != NULL) {
399 			/*
400 			 * We will only return the amount of this packet since
401 			 * that is all that will be read.
402 			 */
403 			amount = skb->len;
404 		}
405 		rc = put_user(amount, (int __user *)arg);
406 	}
407 		break;
408 	default:
409 		rc = -ENOIOCTLCMD;
410 		break;
411 	}
412 out:
413 	release_sock(sk);
414 	return rc;
415 }
416 
417 EXPORT_SYMBOL_GPL(dccp_ioctl);
418 
dccp_setsockopt_service(struct sock * sk,const __be32 service,sockptr_t optval,unsigned int optlen)419 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
420 				   sockptr_t optval, unsigned int optlen)
421 {
422 	struct dccp_sock *dp = dccp_sk(sk);
423 	struct dccp_service_list *sl = NULL;
424 
425 	if (service == DCCP_SERVICE_INVALID_VALUE ||
426 	    optlen > DCCP_SERVICE_LIST_MAX_LEN * sizeof(u32))
427 		return -EINVAL;
428 
429 	if (optlen > sizeof(service)) {
430 		sl = kmalloc(optlen, GFP_KERNEL);
431 		if (sl == NULL)
432 			return -ENOMEM;
433 
434 		sl->dccpsl_nr = optlen / sizeof(u32) - 1;
435 		if (copy_from_sockptr_offset(sl->dccpsl_list, optval,
436 				sizeof(service), optlen - sizeof(service)) ||
437 		    dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
438 			kfree(sl);
439 			return -EFAULT;
440 		}
441 	}
442 
443 	lock_sock(sk);
444 	dp->dccps_service = service;
445 
446 	kfree(dp->dccps_service_list);
447 
448 	dp->dccps_service_list = sl;
449 	release_sock(sk);
450 	return 0;
451 }
452 
dccp_setsockopt_cscov(struct sock * sk,int cscov,bool rx)453 static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
454 {
455 	u8 *list, len;
456 	int i, rc;
457 
458 	if (cscov < 0 || cscov > 15)
459 		return -EINVAL;
460 	/*
461 	 * Populate a list of permissible values, in the range cscov...15. This
462 	 * is necessary since feature negotiation of single values only works if
463 	 * both sides incidentally choose the same value. Since the list starts
464 	 * lowest-value first, negotiation will pick the smallest shared value.
465 	 */
466 	if (cscov == 0)
467 		return 0;
468 	len = 16 - cscov;
469 
470 	list = kmalloc(len, GFP_KERNEL);
471 	if (list == NULL)
472 		return -ENOBUFS;
473 
474 	for (i = 0; i < len; i++)
475 		list[i] = cscov++;
476 
477 	rc = dccp_feat_register_sp(sk, DCCPF_MIN_CSUM_COVER, rx, list, len);
478 
479 	if (rc == 0) {
480 		if (rx)
481 			dccp_sk(sk)->dccps_pcrlen = cscov;
482 		else
483 			dccp_sk(sk)->dccps_pcslen = cscov;
484 	}
485 	kfree(list);
486 	return rc;
487 }
488 
dccp_setsockopt_ccid(struct sock * sk,int type,sockptr_t optval,unsigned int optlen)489 static int dccp_setsockopt_ccid(struct sock *sk, int type,
490 				sockptr_t optval, unsigned int optlen)
491 {
492 	u8 *val;
493 	int rc = 0;
494 
495 	if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
496 		return -EINVAL;
497 
498 	val = memdup_sockptr(optval, optlen);
499 	if (IS_ERR(val))
500 		return PTR_ERR(val);
501 
502 	lock_sock(sk);
503 	if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
504 		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 1, val, optlen);
505 
506 	if (!rc && (type == DCCP_SOCKOPT_RX_CCID || type == DCCP_SOCKOPT_CCID))
507 		rc = dccp_feat_register_sp(sk, DCCPF_CCID, 0, val, optlen);
508 	release_sock(sk);
509 
510 	kfree(val);
511 	return rc;
512 }
513 
do_dccp_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)514 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
515 		sockptr_t optval, unsigned int optlen)
516 {
517 	struct dccp_sock *dp = dccp_sk(sk);
518 	int val, err = 0;
519 
520 	switch (optname) {
521 	case DCCP_SOCKOPT_PACKET_SIZE:
522 		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
523 		return 0;
524 	case DCCP_SOCKOPT_CHANGE_L:
525 	case DCCP_SOCKOPT_CHANGE_R:
526 		DCCP_WARN("sockopt(CHANGE_L/R) is deprecated: fix your app\n");
527 		return 0;
528 	case DCCP_SOCKOPT_CCID:
529 	case DCCP_SOCKOPT_RX_CCID:
530 	case DCCP_SOCKOPT_TX_CCID:
531 		return dccp_setsockopt_ccid(sk, optname, optval, optlen);
532 	}
533 
534 	if (optlen < (int)sizeof(int))
535 		return -EINVAL;
536 
537 	if (copy_from_sockptr(&val, optval, sizeof(int)))
538 		return -EFAULT;
539 
540 	if (optname == DCCP_SOCKOPT_SERVICE)
541 		return dccp_setsockopt_service(sk, val, optval, optlen);
542 
543 	lock_sock(sk);
544 	switch (optname) {
545 	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
546 		if (dp->dccps_role != DCCP_ROLE_SERVER)
547 			err = -EOPNOTSUPP;
548 		else
549 			dp->dccps_server_timewait = (val != 0);
550 		break;
551 	case DCCP_SOCKOPT_SEND_CSCOV:
552 		err = dccp_setsockopt_cscov(sk, val, false);
553 		break;
554 	case DCCP_SOCKOPT_RECV_CSCOV:
555 		err = dccp_setsockopt_cscov(sk, val, true);
556 		break;
557 	case DCCP_SOCKOPT_QPOLICY_ID:
558 		if (sk->sk_state != DCCP_CLOSED)
559 			err = -EISCONN;
560 		else if (val < 0 || val >= DCCPQ_POLICY_MAX)
561 			err = -EINVAL;
562 		else
563 			dp->dccps_qpolicy = val;
564 		break;
565 	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
566 		if (val < 0)
567 			err = -EINVAL;
568 		else
569 			dp->dccps_tx_qlen = val;
570 		break;
571 	default:
572 		err = -ENOPROTOOPT;
573 		break;
574 	}
575 	release_sock(sk);
576 
577 	return err;
578 }
579 
dccp_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)580 int dccp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
581 		    unsigned int optlen)
582 {
583 	if (level != SOL_DCCP)
584 		return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
585 							     optname, optval,
586 							     optlen);
587 	return do_dccp_setsockopt(sk, level, optname, optval, optlen);
588 }
589 
590 EXPORT_SYMBOL_GPL(dccp_setsockopt);
591 
dccp_getsockopt_service(struct sock * sk,int len,__be32 __user * optval,int __user * optlen)592 static int dccp_getsockopt_service(struct sock *sk, int len,
593 				   __be32 __user *optval,
594 				   int __user *optlen)
595 {
596 	const struct dccp_sock *dp = dccp_sk(sk);
597 	const struct dccp_service_list *sl;
598 	int err = -ENOENT, slen = 0, total_len = sizeof(u32);
599 
600 	lock_sock(sk);
601 	if ((sl = dp->dccps_service_list) != NULL) {
602 		slen = sl->dccpsl_nr * sizeof(u32);
603 		total_len += slen;
604 	}
605 
606 	err = -EINVAL;
607 	if (total_len > len)
608 		goto out;
609 
610 	err = 0;
611 	if (put_user(total_len, optlen) ||
612 	    put_user(dp->dccps_service, optval) ||
613 	    (sl != NULL && copy_to_user(optval + 1, sl->dccpsl_list, slen)))
614 		err = -EFAULT;
615 out:
616 	release_sock(sk);
617 	return err;
618 }
619 
do_dccp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)620 static int do_dccp_getsockopt(struct sock *sk, int level, int optname,
621 		    char __user *optval, int __user *optlen)
622 {
623 	struct dccp_sock *dp;
624 	int val, len;
625 
626 	if (get_user(len, optlen))
627 		return -EFAULT;
628 
629 	if (len < (int)sizeof(int))
630 		return -EINVAL;
631 
632 	dp = dccp_sk(sk);
633 
634 	switch (optname) {
635 	case DCCP_SOCKOPT_PACKET_SIZE:
636 		DCCP_WARN("sockopt(PACKET_SIZE) is deprecated: fix your app\n");
637 		return 0;
638 	case DCCP_SOCKOPT_SERVICE:
639 		return dccp_getsockopt_service(sk, len,
640 					       (__be32 __user *)optval, optlen);
641 	case DCCP_SOCKOPT_GET_CUR_MPS:
642 		val = dp->dccps_mss_cache;
643 		break;
644 	case DCCP_SOCKOPT_AVAILABLE_CCIDS:
645 		return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen);
646 	case DCCP_SOCKOPT_TX_CCID:
647 		val = ccid_get_current_tx_ccid(dp);
648 		if (val < 0)
649 			return -ENOPROTOOPT;
650 		break;
651 	case DCCP_SOCKOPT_RX_CCID:
652 		val = ccid_get_current_rx_ccid(dp);
653 		if (val < 0)
654 			return -ENOPROTOOPT;
655 		break;
656 	case DCCP_SOCKOPT_SERVER_TIMEWAIT:
657 		val = dp->dccps_server_timewait;
658 		break;
659 	case DCCP_SOCKOPT_SEND_CSCOV:
660 		val = dp->dccps_pcslen;
661 		break;
662 	case DCCP_SOCKOPT_RECV_CSCOV:
663 		val = dp->dccps_pcrlen;
664 		break;
665 	case DCCP_SOCKOPT_QPOLICY_ID:
666 		val = dp->dccps_qpolicy;
667 		break;
668 	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
669 		val = dp->dccps_tx_qlen;
670 		break;
671 	case 128 ... 191:
672 		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
673 					     len, (u32 __user *)optval, optlen);
674 	case 192 ... 255:
675 		return ccid_hc_tx_getsockopt(dp->dccps_hc_tx_ccid, sk, optname,
676 					     len, (u32 __user *)optval, optlen);
677 	default:
678 		return -ENOPROTOOPT;
679 	}
680 
681 	len = sizeof(val);
682 	if (put_user(len, optlen) || copy_to_user(optval, &val, len))
683 		return -EFAULT;
684 
685 	return 0;
686 }
687 
dccp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)688 int dccp_getsockopt(struct sock *sk, int level, int optname,
689 		    char __user *optval, int __user *optlen)
690 {
691 	if (level != SOL_DCCP)
692 		return inet_csk(sk)->icsk_af_ops->getsockopt(sk, level,
693 							     optname, optval,
694 							     optlen);
695 	return do_dccp_getsockopt(sk, level, optname, optval, optlen);
696 }
697 
698 EXPORT_SYMBOL_GPL(dccp_getsockopt);
699 
dccp_msghdr_parse(struct msghdr * msg,struct sk_buff * skb)700 static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
701 {
702 	struct cmsghdr *cmsg;
703 
704 	/*
705 	 * Assign an (opaque) qpolicy priority value to skb->priority.
706 	 *
707 	 * We are overloading this skb field for use with the qpolicy subystem.
708 	 * The skb->priority is normally used for the SO_PRIORITY option, which
709 	 * is initialised from sk_priority. Since the assignment of sk_priority
710 	 * to skb->priority happens later (on layer 3), we overload this field
711 	 * for use with queueing priorities as long as the skb is on layer 4.
712 	 * The default priority value (if nothing is set) is 0.
713 	 */
714 	skb->priority = 0;
715 
716 	for_each_cmsghdr(cmsg, msg) {
717 		if (!CMSG_OK(msg, cmsg))
718 			return -EINVAL;
719 
720 		if (cmsg->cmsg_level != SOL_DCCP)
721 			continue;
722 
723 		if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
724 		    !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
725 			return -EINVAL;
726 
727 		switch (cmsg->cmsg_type) {
728 		case DCCP_SCM_PRIORITY:
729 			if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
730 				return -EINVAL;
731 			skb->priority = *(__u32 *)CMSG_DATA(cmsg);
732 			break;
733 		default:
734 			return -EINVAL;
735 		}
736 	}
737 	return 0;
738 }
739 
dccp_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)740 int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
741 {
742 	const struct dccp_sock *dp = dccp_sk(sk);
743 	const int flags = msg->msg_flags;
744 	const int noblock = flags & MSG_DONTWAIT;
745 	struct sk_buff *skb;
746 	int rc, size;
747 	long timeo;
748 
749 	trace_dccp_probe(sk, len);
750 
751 	if (len > dp->dccps_mss_cache)
752 		return -EMSGSIZE;
753 
754 	lock_sock(sk);
755 
756 	timeo = sock_sndtimeo(sk, noblock);
757 
758 	/*
759 	 * We have to use sk_stream_wait_connect here to set sk_write_pending,
760 	 * so that the trick in dccp_rcv_request_sent_state_process.
761 	 */
762 	/* Wait for a connection to finish. */
763 	if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
764 		if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
765 			goto out_release;
766 
767 	size = sk->sk_prot->max_header + len;
768 	release_sock(sk);
769 	skb = sock_alloc_send_skb(sk, size, noblock, &rc);
770 	lock_sock(sk);
771 	if (skb == NULL)
772 		goto out_release;
773 
774 	if (dccp_qpolicy_full(sk)) {
775 		rc = -EAGAIN;
776 		goto out_discard;
777 	}
778 
779 	if (sk->sk_state == DCCP_CLOSED) {
780 		rc = -ENOTCONN;
781 		goto out_discard;
782 	}
783 
784 	skb_reserve(skb, sk->sk_prot->max_header);
785 	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
786 	if (rc != 0)
787 		goto out_discard;
788 
789 	rc = dccp_msghdr_parse(msg, skb);
790 	if (rc != 0)
791 		goto out_discard;
792 
793 	dccp_qpolicy_push(sk, skb);
794 	/*
795 	 * The xmit_timer is set if the TX CCID is rate-based and will expire
796 	 * when congestion control permits to release further packets into the
797 	 * network. Window-based CCIDs do not use this timer.
798 	 */
799 	if (!timer_pending(&dp->dccps_xmit_timer))
800 		dccp_write_xmit(sk);
801 out_release:
802 	release_sock(sk);
803 	return rc ? : len;
804 out_discard:
805 	kfree_skb(skb);
806 	goto out_release;
807 }
808 
809 EXPORT_SYMBOL_GPL(dccp_sendmsg);
810 
dccp_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int nonblock,int flags,int * addr_len)811 int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
812 		 int flags, int *addr_len)
813 {
814 	const struct dccp_hdr *dh;
815 	long timeo;
816 
817 	lock_sock(sk);
818 
819 	if (sk->sk_state == DCCP_LISTEN) {
820 		len = -ENOTCONN;
821 		goto out;
822 	}
823 
824 	timeo = sock_rcvtimeo(sk, nonblock);
825 
826 	do {
827 		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
828 
829 		if (skb == NULL)
830 			goto verify_sock_status;
831 
832 		dh = dccp_hdr(skb);
833 
834 		switch (dh->dccph_type) {
835 		case DCCP_PKT_DATA:
836 		case DCCP_PKT_DATAACK:
837 			goto found_ok_skb;
838 
839 		case DCCP_PKT_CLOSE:
840 		case DCCP_PKT_CLOSEREQ:
841 			if (!(flags & MSG_PEEK))
842 				dccp_finish_passive_close(sk);
843 			fallthrough;
844 		case DCCP_PKT_RESET:
845 			dccp_pr_debug("found fin (%s) ok!\n",
846 				      dccp_packet_name(dh->dccph_type));
847 			len = 0;
848 			goto found_fin_ok;
849 		default:
850 			dccp_pr_debug("packet_type=%s\n",
851 				      dccp_packet_name(dh->dccph_type));
852 			sk_eat_skb(sk, skb);
853 		}
854 verify_sock_status:
855 		if (sock_flag(sk, SOCK_DONE)) {
856 			len = 0;
857 			break;
858 		}
859 
860 		if (sk->sk_err) {
861 			len = sock_error(sk);
862 			break;
863 		}
864 
865 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
866 			len = 0;
867 			break;
868 		}
869 
870 		if (sk->sk_state == DCCP_CLOSED) {
871 			if (!sock_flag(sk, SOCK_DONE)) {
872 				/* This occurs when user tries to read
873 				 * from never connected socket.
874 				 */
875 				len = -ENOTCONN;
876 				break;
877 			}
878 			len = 0;
879 			break;
880 		}
881 
882 		if (!timeo) {
883 			len = -EAGAIN;
884 			break;
885 		}
886 
887 		if (signal_pending(current)) {
888 			len = sock_intr_errno(timeo);
889 			break;
890 		}
891 
892 		sk_wait_data(sk, &timeo, NULL);
893 		continue;
894 	found_ok_skb:
895 		if (len > skb->len)
896 			len = skb->len;
897 		else if (len < skb->len)
898 			msg->msg_flags |= MSG_TRUNC;
899 
900 		if (skb_copy_datagram_msg(skb, 0, msg, len)) {
901 			/* Exception. Bailout! */
902 			len = -EFAULT;
903 			break;
904 		}
905 		if (flags & MSG_TRUNC)
906 			len = skb->len;
907 	found_fin_ok:
908 		if (!(flags & MSG_PEEK))
909 			sk_eat_skb(sk, skb);
910 		break;
911 	} while (1);
912 out:
913 	release_sock(sk);
914 	return len;
915 }
916 
917 EXPORT_SYMBOL_GPL(dccp_recvmsg);
918 
inet_dccp_listen(struct socket * sock,int backlog)919 int inet_dccp_listen(struct socket *sock, int backlog)
920 {
921 	struct sock *sk = sock->sk;
922 	unsigned char old_state;
923 	int err;
924 
925 	lock_sock(sk);
926 
927 	err = -EINVAL;
928 	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
929 		goto out;
930 
931 	old_state = sk->sk_state;
932 	if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
933 		goto out;
934 
935 	WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
936 	/* Really, if the socket is already in listen state
937 	 * we can only allow the backlog to be adjusted.
938 	 */
939 	if (old_state != DCCP_LISTEN) {
940 		/*
941 		 * FIXME: here it probably should be sk->sk_prot->listen_start
942 		 * see tcp_listen_start
943 		 */
944 		err = dccp_listen_start(sk, backlog);
945 		if (err)
946 			goto out;
947 	}
948 	err = 0;
949 
950 out:
951 	release_sock(sk);
952 	return err;
953 }
954 
955 EXPORT_SYMBOL_GPL(inet_dccp_listen);
956 
dccp_terminate_connection(struct sock * sk)957 static void dccp_terminate_connection(struct sock *sk)
958 {
959 	u8 next_state = DCCP_CLOSED;
960 
961 	switch (sk->sk_state) {
962 	case DCCP_PASSIVE_CLOSE:
963 	case DCCP_PASSIVE_CLOSEREQ:
964 		dccp_finish_passive_close(sk);
965 		break;
966 	case DCCP_PARTOPEN:
967 		dccp_pr_debug("Stop PARTOPEN timer (%p)\n", sk);
968 		inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
969 		fallthrough;
970 	case DCCP_OPEN:
971 		dccp_send_close(sk, 1);
972 
973 		if (dccp_sk(sk)->dccps_role == DCCP_ROLE_SERVER &&
974 		    !dccp_sk(sk)->dccps_server_timewait)
975 			next_state = DCCP_ACTIVE_CLOSEREQ;
976 		else
977 			next_state = DCCP_CLOSING;
978 		fallthrough;
979 	default:
980 		dccp_set_state(sk, next_state);
981 	}
982 }
983 
dccp_close(struct sock * sk,long timeout)984 void dccp_close(struct sock *sk, long timeout)
985 {
986 	struct dccp_sock *dp = dccp_sk(sk);
987 	struct sk_buff *skb;
988 	u32 data_was_unread = 0;
989 	int state;
990 
991 	lock_sock(sk);
992 
993 	sk->sk_shutdown = SHUTDOWN_MASK;
994 
995 	if (sk->sk_state == DCCP_LISTEN) {
996 		dccp_set_state(sk, DCCP_CLOSED);
997 
998 		/* Special case. */
999 		inet_csk_listen_stop(sk);
1000 
1001 		goto adjudge_to_death;
1002 	}
1003 
1004 	sk_stop_timer(sk, &dp->dccps_xmit_timer);
1005 
1006 	/*
1007 	 * We need to flush the recv. buffs.  We do this only on the
1008 	 * descriptor close, not protocol-sourced closes, because the
1009 	  *reader process may not have drained the data yet!
1010 	 */
1011 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1012 		data_was_unread += skb->len;
1013 		__kfree_skb(skb);
1014 	}
1015 
1016 	/* If socket has been already reset kill it. */
1017 	if (sk->sk_state == DCCP_CLOSED)
1018 		goto adjudge_to_death;
1019 
1020 	if (data_was_unread) {
1021 		/* Unread data was tossed, send an appropriate Reset Code */
1022 		DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
1023 		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
1024 		dccp_set_state(sk, DCCP_CLOSED);
1025 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1026 		/* Check zero linger _after_ checking for unread data. */
1027 		sk->sk_prot->disconnect(sk, 0);
1028 	} else if (sk->sk_state != DCCP_CLOSED) {
1029 		/*
1030 		 * Normal connection termination. May need to wait if there are
1031 		 * still packets in the TX queue that are delayed by the CCID.
1032 		 */
1033 		dccp_flush_write_queue(sk, &timeout);
1034 		dccp_terminate_connection(sk);
1035 	}
1036 
1037 	/*
1038 	 * Flush write queue. This may be necessary in several cases:
1039 	 * - we have been closed by the peer but still have application data;
1040 	 * - abortive termination (unread data or zero linger time),
1041 	 * - normal termination but queue could not be flushed within time limit
1042 	 */
1043 	__skb_queue_purge(&sk->sk_write_queue);
1044 
1045 	sk_stream_wait_close(sk, timeout);
1046 
1047 adjudge_to_death:
1048 	state = sk->sk_state;
1049 	sock_hold(sk);
1050 	sock_orphan(sk);
1051 
1052 	/*
1053 	 * It is the last release_sock in its life. It will remove backlog.
1054 	 */
1055 	release_sock(sk);
1056 	/*
1057 	 * Now socket is owned by kernel and we acquire BH lock
1058 	 * to finish close. No need to check for user refs.
1059 	 */
1060 	local_bh_disable();
1061 	bh_lock_sock(sk);
1062 	WARN_ON(sock_owned_by_user(sk));
1063 
1064 	this_cpu_inc(dccp_orphan_count);
1065 
1066 	/* Have we already been destroyed by a softirq or backlog? */
1067 	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
1068 		goto out;
1069 
1070 	if (sk->sk_state == DCCP_CLOSED)
1071 		inet_csk_destroy_sock(sk);
1072 
1073 	/* Otherwise, socket is reprieved until protocol close. */
1074 
1075 out:
1076 	bh_unlock_sock(sk);
1077 	local_bh_enable();
1078 	sock_put(sk);
1079 }
1080 
1081 EXPORT_SYMBOL_GPL(dccp_close);
1082 
dccp_shutdown(struct sock * sk,int how)1083 void dccp_shutdown(struct sock *sk, int how)
1084 {
1085 	dccp_pr_debug("called shutdown(%x)\n", how);
1086 }
1087 
1088 EXPORT_SYMBOL_GPL(dccp_shutdown);
1089 
dccp_mib_init(void)1090 static inline int __init dccp_mib_init(void)
1091 {
1092 	dccp_statistics = alloc_percpu(struct dccp_mib);
1093 	if (!dccp_statistics)
1094 		return -ENOMEM;
1095 	return 0;
1096 }
1097 
dccp_mib_exit(void)1098 static inline void dccp_mib_exit(void)
1099 {
1100 	free_percpu(dccp_statistics);
1101 }
1102 
1103 static int thash_entries;
1104 module_param(thash_entries, int, 0444);
1105 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
1106 
1107 #ifdef CONFIG_IP_DCCP_DEBUG
1108 bool dccp_debug;
1109 module_param(dccp_debug, bool, 0644);
1110 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
1111 
1112 EXPORT_SYMBOL_GPL(dccp_debug);
1113 #endif
1114 
dccp_init(void)1115 static int __init dccp_init(void)
1116 {
1117 	unsigned long goal;
1118 	unsigned long nr_pages = totalram_pages();
1119 	int ehash_order, bhash_order, i;
1120 	int rc;
1121 
1122 	BUILD_BUG_ON(sizeof(struct dccp_skb_cb) >
1123 		     sizeof_field(struct sk_buff, cb));
1124 	inet_hashinfo_init(&dccp_hashinfo);
1125 	rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
1126 	if (rc)
1127 		goto out_fail;
1128 	rc = -ENOBUFS;
1129 	dccp_hashinfo.bind_bucket_cachep =
1130 		kmem_cache_create("dccp_bind_bucket",
1131 				  sizeof(struct inet_bind_bucket), 0,
1132 				  SLAB_HWCACHE_ALIGN, NULL);
1133 	if (!dccp_hashinfo.bind_bucket_cachep)
1134 		goto out_free_hashinfo2;
1135 
1136 	/*
1137 	 * Size and allocate the main established and bind bucket
1138 	 * hash tables.
1139 	 *
1140 	 * The methodology is similar to that of the buffer cache.
1141 	 */
1142 	if (nr_pages >= (128 * 1024))
1143 		goal = nr_pages >> (21 - PAGE_SHIFT);
1144 	else
1145 		goal = nr_pages >> (23 - PAGE_SHIFT);
1146 
1147 	if (thash_entries)
1148 		goal = (thash_entries *
1149 			sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
1150 	for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
1151 		;
1152 	do {
1153 		unsigned long hash_size = (1UL << ehash_order) * PAGE_SIZE /
1154 					sizeof(struct inet_ehash_bucket);
1155 
1156 		while (hash_size & (hash_size - 1))
1157 			hash_size--;
1158 		dccp_hashinfo.ehash_mask = hash_size - 1;
1159 		dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
1160 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, ehash_order);
1161 	} while (!dccp_hashinfo.ehash && --ehash_order > 0);
1162 
1163 	if (!dccp_hashinfo.ehash) {
1164 		DCCP_CRIT("Failed to allocate DCCP established hash table");
1165 		goto out_free_bind_bucket_cachep;
1166 	}
1167 
1168 	for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
1169 		INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
1170 
1171 	if (inet_ehash_locks_alloc(&dccp_hashinfo))
1172 			goto out_free_dccp_ehash;
1173 
1174 	bhash_order = ehash_order;
1175 
1176 	do {
1177 		dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
1178 					sizeof(struct inet_bind_hashbucket);
1179 		if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
1180 		    bhash_order > 0)
1181 			continue;
1182 		dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
1183 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, bhash_order);
1184 	} while (!dccp_hashinfo.bhash && --bhash_order >= 0);
1185 
1186 	if (!dccp_hashinfo.bhash) {
1187 		DCCP_CRIT("Failed to allocate DCCP bind hash table");
1188 		goto out_free_dccp_locks;
1189 	}
1190 
1191 	for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
1192 		spin_lock_init(&dccp_hashinfo.bhash[i].lock);
1193 		INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
1194 	}
1195 
1196 	rc = dccp_mib_init();
1197 	if (rc)
1198 		goto out_free_dccp_bhash;
1199 
1200 	rc = dccp_ackvec_init();
1201 	if (rc)
1202 		goto out_free_dccp_mib;
1203 
1204 	rc = dccp_sysctl_init();
1205 	if (rc)
1206 		goto out_ackvec_exit;
1207 
1208 	rc = ccid_initialize_builtins();
1209 	if (rc)
1210 		goto out_sysctl_exit;
1211 
1212 	dccp_timestamping_init();
1213 
1214 	return 0;
1215 
1216 out_sysctl_exit:
1217 	dccp_sysctl_exit();
1218 out_ackvec_exit:
1219 	dccp_ackvec_exit();
1220 out_free_dccp_mib:
1221 	dccp_mib_exit();
1222 out_free_dccp_bhash:
1223 	free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
1224 out_free_dccp_locks:
1225 	inet_ehash_locks_free(&dccp_hashinfo);
1226 out_free_dccp_ehash:
1227 	free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
1228 out_free_bind_bucket_cachep:
1229 	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1230 out_free_hashinfo2:
1231 	inet_hashinfo2_free_mod(&dccp_hashinfo);
1232 out_fail:
1233 	dccp_hashinfo.bhash = NULL;
1234 	dccp_hashinfo.ehash = NULL;
1235 	dccp_hashinfo.bind_bucket_cachep = NULL;
1236 	return rc;
1237 }
1238 
dccp_fini(void)1239 static void __exit dccp_fini(void)
1240 {
1241 	ccid_cleanup_builtins();
1242 	dccp_mib_exit();
1243 	free_pages((unsigned long)dccp_hashinfo.bhash,
1244 		   get_order(dccp_hashinfo.bhash_size *
1245 			     sizeof(struct inet_bind_hashbucket)));
1246 	free_pages((unsigned long)dccp_hashinfo.ehash,
1247 		   get_order((dccp_hashinfo.ehash_mask + 1) *
1248 			     sizeof(struct inet_ehash_bucket)));
1249 	inet_ehash_locks_free(&dccp_hashinfo);
1250 	kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
1251 	dccp_ackvec_exit();
1252 	dccp_sysctl_exit();
1253 	inet_hashinfo2_free_mod(&dccp_hashinfo);
1254 }
1255 
1256 module_init(dccp_init);
1257 module_exit(dccp_fini);
1258 
1259 MODULE_LICENSE("GPL");
1260 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
1261 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");
1262