• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  net/dccp/timer.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *	This program is free software; you can redistribute it and/or
8  *	modify it under the terms of the GNU General Public License
9  *	as published by the Free Software Foundation; either version
10  *	2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/dccp.h>
14 #include <linux/skbuff.h>
15 #include <linux/export.h>
16 
17 #include "dccp.h"
18 
19 /* sysctl variables governing numbers of retransmission attempts */
20 int  sysctl_dccp_request_retries	__read_mostly = TCP_SYN_RETRIES;
21 int  sysctl_dccp_retries1		__read_mostly = TCP_RETR1;
22 int  sysctl_dccp_retries2		__read_mostly = TCP_RETR2;
23 
dccp_write_err(struct sock * sk)24 static void dccp_write_err(struct sock *sk)
25 {
26 	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
27 	sk->sk_error_report(sk);
28 
29 	dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
30 	dccp_done(sk);
31 	DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT);
32 }
33 
34 /* A write timeout has occurred. Process the after effects. */
dccp_write_timeout(struct sock * sk)35 static int dccp_write_timeout(struct sock *sk)
36 {
37 	const struct inet_connection_sock *icsk = inet_csk(sk);
38 	int retry_until;
39 
40 	if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
41 		if (icsk->icsk_retransmits != 0)
42 			dst_negative_advice(sk);
43 		retry_until = icsk->icsk_syn_retries ?
44 			    : sysctl_dccp_request_retries;
45 	} else {
46 		if (icsk->icsk_retransmits >= sysctl_dccp_retries1) {
47 			/* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu
48 			   black hole detection. :-(
49 
50 			   It is place to make it. It is not made. I do not want
51 			   to make it. It is disguisting. It does not work in any
52 			   case. Let me to cite the same draft, which requires for
53 			   us to implement this:
54 
55    "The one security concern raised by this memo is that ICMP black holes
56    are often caused by over-zealous security administrators who block
57    all ICMP messages.  It is vitally important that those who design and
58    deploy security systems understand the impact of strict filtering on
59    upper-layer protocols.  The safest web site in the world is worthless
60    if most TCP implementations cannot transfer data from it.  It would
61    be far nicer to have all of the black holes fixed rather than fixing
62    all of the TCP implementations."
63 
64 			   Golden words :-).
65 		   */
66 
67 			dst_negative_advice(sk);
68 		}
69 
70 		retry_until = sysctl_dccp_retries2;
71 		/*
72 		 * FIXME: see tcp_write_timout and tcp_out_of_resources
73 		 */
74 	}
75 
76 	if (icsk->icsk_retransmits >= retry_until) {
77 		/* Has it gone just too far? */
78 		dccp_write_err(sk);
79 		return 1;
80 	}
81 	return 0;
82 }
83 
84 /*
85  *	The DCCP retransmit timer.
86  */
dccp_retransmit_timer(struct sock * sk)87 static void dccp_retransmit_timer(struct sock *sk)
88 {
89 	struct inet_connection_sock *icsk = inet_csk(sk);
90 
91 	/*
92 	 * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
93 	 * sent, no need to retransmit, this sock is dead.
94 	 */
95 	if (dccp_write_timeout(sk))
96 		return;
97 
98 	/*
99 	 * We want to know the number of packets retransmitted, not the
100 	 * total number of retransmissions of clones of original packets.
101 	 */
102 	if (icsk->icsk_retransmits == 0)
103 		DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS);
104 
105 	if (dccp_retransmit_skb(sk) != 0) {
106 		/*
107 		 * Retransmission failed because of local congestion,
108 		 * do not backoff.
109 		 */
110 		if (--icsk->icsk_retransmits == 0)
111 			icsk->icsk_retransmits = 1;
112 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
113 					  min(icsk->icsk_rto,
114 					      TCP_RESOURCE_PROBE_INTERVAL),
115 					  DCCP_RTO_MAX);
116 		return;
117 	}
118 
119 	icsk->icsk_backoff++;
120 
121 	icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX);
122 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto,
123 				  DCCP_RTO_MAX);
124 	if (icsk->icsk_retransmits > sysctl_dccp_retries1)
125 		__sk_dst_reset(sk);
126 }
127 
dccp_write_timer(unsigned long data)128 static void dccp_write_timer(unsigned long data)
129 {
130 	struct sock *sk = (struct sock *)data;
131 	struct inet_connection_sock *icsk = inet_csk(sk);
132 	int event = 0;
133 
134 	bh_lock_sock(sk);
135 	if (sock_owned_by_user(sk)) {
136 		/* Try again later */
137 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
138 			       jiffies + (HZ / 20));
139 		goto out;
140 	}
141 
142 	if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending)
143 		goto out;
144 
145 	if (time_after(icsk->icsk_timeout, jiffies)) {
146 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer,
147 			       icsk->icsk_timeout);
148 		goto out;
149 	}
150 
151 	event = icsk->icsk_pending;
152 	icsk->icsk_pending = 0;
153 
154 	switch (event) {
155 	case ICSK_TIME_RETRANS:
156 		dccp_retransmit_timer(sk);
157 		break;
158 	}
159 out:
160 	bh_unlock_sock(sk);
161 	sock_put(sk);
162 }
163 
164 /*
165  *	Timer for listening sockets
166  */
dccp_response_timer(struct sock * sk)167 static void dccp_response_timer(struct sock *sk)
168 {
169 	inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
170 				   DCCP_RTO_MAX);
171 }
172 
dccp_keepalive_timer(unsigned long data)173 static void dccp_keepalive_timer(unsigned long data)
174 {
175 	struct sock *sk = (struct sock *)data;
176 
177 	/* Only process if socket is not in use. */
178 	bh_lock_sock(sk);
179 	if (sock_owned_by_user(sk)) {
180 		/* Try again later. */
181 		inet_csk_reset_keepalive_timer(sk, HZ / 20);
182 		goto out;
183 	}
184 
185 	if (sk->sk_state == DCCP_LISTEN) {
186 		dccp_response_timer(sk);
187 		goto out;
188 	}
189 out:
190 	bh_unlock_sock(sk);
191 	sock_put(sk);
192 }
193 
194 /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */
dccp_delack_timer(unsigned long data)195 static void dccp_delack_timer(unsigned long data)
196 {
197 	struct sock *sk = (struct sock *)data;
198 	struct inet_connection_sock *icsk = inet_csk(sk);
199 
200 	bh_lock_sock(sk);
201 	if (sock_owned_by_user(sk)) {
202 		/* Try again later. */
203 		icsk->icsk_ack.blocked = 1;
204 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
205 		sk_reset_timer(sk, &icsk->icsk_delack_timer,
206 			       jiffies + TCP_DELACK_MIN);
207 		goto out;
208 	}
209 
210 	if (sk->sk_state == DCCP_CLOSED ||
211 	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
212 		goto out;
213 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
214 		sk_reset_timer(sk, &icsk->icsk_delack_timer,
215 			       icsk->icsk_ack.timeout);
216 		goto out;
217 	}
218 
219 	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
220 
221 	if (inet_csk_ack_scheduled(sk)) {
222 		if (!icsk->icsk_ack.pingpong) {
223 			/* Delayed ACK missed: inflate ATO. */
224 			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1,
225 						 icsk->icsk_rto);
226 		} else {
227 			/* Delayed ACK missed: leave pingpong mode and
228 			 * deflate ATO.
229 			 */
230 			icsk->icsk_ack.pingpong = 0;
231 			icsk->icsk_ack.ato = TCP_ATO_MIN;
232 		}
233 		dccp_send_ack(sk);
234 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
235 	}
236 out:
237 	bh_unlock_sock(sk);
238 	sock_put(sk);
239 }
240 
241 /**
242  * dccp_write_xmitlet  -  Workhorse for CCID packet dequeueing interface
243  * See the comments above %ccid_dequeueing_decision for supported modes.
244  */
dccp_write_xmitlet(unsigned long data)245 static void dccp_write_xmitlet(unsigned long data)
246 {
247 	struct sock *sk = (struct sock *)data;
248 
249 	bh_lock_sock(sk);
250 	if (sock_owned_by_user(sk))
251 		sk_reset_timer(sk, &dccp_sk(sk)->dccps_xmit_timer, jiffies + 1);
252 	else
253 		dccp_write_xmit(sk);
254 	bh_unlock_sock(sk);
255 }
256 
dccp_write_xmit_timer(unsigned long data)257 static void dccp_write_xmit_timer(unsigned long data)
258 {
259 	dccp_write_xmitlet(data);
260 	sock_put((struct sock *)data);
261 }
262 
dccp_init_xmit_timers(struct sock * sk)263 void dccp_init_xmit_timers(struct sock *sk)
264 {
265 	struct dccp_sock *dp = dccp_sk(sk);
266 
267 	tasklet_init(&dp->dccps_xmitlet, dccp_write_xmitlet, (unsigned long)sk);
268 	setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer,
269 							     (unsigned long)sk);
270 	inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer,
271 				  &dccp_keepalive_timer);
272 }
273 
274 static ktime_t dccp_timestamp_seed;
275 /**
276  * dccp_timestamp  -  10s of microseconds time source
277  * Returns the number of 10s of microseconds since loading DCCP. This is native
278  * DCCP time difference format (RFC 4340, sec. 13).
279  * Please note: This will wrap around about circa every 11.9 hours.
280  */
dccp_timestamp(void)281 u32 dccp_timestamp(void)
282 {
283 	s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed);
284 
285 	do_div(delta, 10);
286 	return delta;
287 }
288 EXPORT_SYMBOL_GPL(dccp_timestamp);
289 
dccp_timestamping_init(void)290 void __init dccp_timestamping_init(void)
291 {
292 	dccp_timestamp_seed = ktime_get_real();
293 }
294