• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Implementation of the Transmission Control Protocol(TCP).
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
12  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13  *		Florian La Roche, <flla@stud.uni-sb.de>
14  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
16  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
17  *		Matthew Dillon, <dillon@apollo.west.oic.com>
18  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19  *		Jorge Cwik, <jorge@laser.satlink.net>
20  */
21 
22 #include <linux/module.h>
23 #include <linux/gfp.h>
24 #include <net/tcp.h>
25 
tcp_clamp_rto_to_user_timeout(const struct sock * sk)26 static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
27 {
28 	struct inet_connection_sock *icsk = inet_csk(sk);
29 	u32 elapsed, start_ts;
30 	s32 remaining;
31 
32 	start_ts = tcp_sk(sk)->retrans_stamp;
33 	if (!icsk->icsk_user_timeout)
34 		return icsk->icsk_rto;
35 	elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
36 	remaining = icsk->icsk_user_timeout - elapsed;
37 	if (remaining <= 0)
38 		return 1; /* user timeout has passed; fire ASAP */
39 
40 	return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
41 }
42 
tcp_clamp_probe0_to_user_timeout(const struct sock * sk,u32 when)43 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
44 {
45 	struct inet_connection_sock *icsk = inet_csk(sk);
46 	u32 remaining;
47 	s32 elapsed;
48 
49 	if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
50 		return when;
51 
52 	elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
53 	if (unlikely(elapsed < 0))
54 		elapsed = 0;
55 	remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
56 	remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
57 
58 	return min_t(u32, remaining, when);
59 }
60 
61 /**
62  *  tcp_write_err() - close socket and save error info
63  *  @sk:  The socket the error has appeared on.
64  *
65  *  Returns: Nothing (void)
66  */
67 
tcp_write_err(struct sock * sk)68 static void tcp_write_err(struct sock *sk)
69 {
70 	sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
71 	sk->sk_error_report(sk);
72 
73 	tcp_write_queue_purge(sk);
74 	tcp_done(sk);
75 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
76 }
77 
78 /**
79  *  tcp_out_of_resources() - Close socket if out of resources
80  *  @sk:        pointer to current socket
81  *  @do_reset:  send a last packet with reset flag
82  *
83  *  Do not allow orphaned sockets to eat all our resources.
84  *  This is direct violation of TCP specs, but it is required
85  *  to prevent DoS attacks. It is called when a retransmission timeout
86  *  or zero probe timeout occurs on orphaned socket.
87  *
88  *  Also close if our net namespace is exiting; in that case there is no
89  *  hope of ever communicating again since all netns interfaces are already
90  *  down (or about to be down), and we need to release our dst references,
91  *  which have been moved to the netns loopback interface, so the namespace
92  *  can finish exiting.  This condition is only possible if we are a kernel
93  *  socket, as those do not hold references to the namespace.
94  *
95  *  Criteria is still not confirmed experimentally and may change.
96  *  We kill the socket, if:
97  *  1. If number of orphaned sockets exceeds an administratively configured
98  *     limit.
99  *  2. If we have strong memory pressure.
100  *  3. If our net namespace is exiting.
101  */
tcp_out_of_resources(struct sock * sk,bool do_reset)102 static int tcp_out_of_resources(struct sock *sk, bool do_reset)
103 {
104 	struct tcp_sock *tp = tcp_sk(sk);
105 	int shift = 0;
106 
107 	/* If peer does not open window for long time, or did not transmit
108 	 * anything for long time, penalize it. */
109 	if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
110 		shift++;
111 
112 	/* If some dubious ICMP arrived, penalize even more. */
113 	if (sk->sk_err_soft)
114 		shift++;
115 
116 	if (tcp_check_oom(sk, shift)) {
117 		/* Catch exceptional cases, when connection requires reset.
118 		 *      1. Last segment was sent recently. */
119 		if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
120 		    /*  2. Window is closed. */
121 		    (!tp->snd_wnd && !tp->packets_out))
122 			do_reset = true;
123 		if (do_reset)
124 			tcp_send_active_reset(sk, GFP_ATOMIC);
125 		tcp_done(sk);
126 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
127 		return 1;
128 	}
129 
130 	if (!check_net(sock_net(sk))) {
131 		/* Not possible to send reset; just close */
132 		tcp_done(sk);
133 		return 1;
134 	}
135 
136 	return 0;
137 }
138 
139 /**
140  *  tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
141  *  @sk:    Pointer to the current socket.
142  *  @alive: bool, socket alive state
143  */
tcp_orphan_retries(struct sock * sk,bool alive)144 static int tcp_orphan_retries(struct sock *sk, bool alive)
145 {
146 	int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
147 
148 	/* We know from an ICMP that something is wrong. */
149 	if (sk->sk_err_soft && !alive)
150 		retries = 0;
151 
152 	/* However, if socket sent something recently, select some safe
153 	 * number of retries. 8 corresponds to >100 seconds with minimal
154 	 * RTO of 200msec. */
155 	if (retries == 0 && alive)
156 		retries = 8;
157 	return retries;
158 }
159 
tcp_mtu_probing(struct inet_connection_sock * icsk,struct sock * sk)160 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
161 {
162 	const struct net *net = sock_net(sk);
163 	int mss;
164 
165 	/* Black hole detection */
166 	if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing))
167 		return;
168 
169 	if (!icsk->icsk_mtup.enabled) {
170 		icsk->icsk_mtup.enabled = 1;
171 		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
172 	} else {
173 		mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
174 		mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss);
175 		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor));
176 		mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss));
177 		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
178 	}
179 	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
180 }
181 
tcp_model_timeout(struct sock * sk,unsigned int boundary,unsigned int rto_base)182 static unsigned int tcp_model_timeout(struct sock *sk,
183 				      unsigned int boundary,
184 				      unsigned int rto_base)
185 {
186 	unsigned int linear_backoff_thresh, timeout;
187 
188 	linear_backoff_thresh = ilog2(TCP_RTO_MAX / rto_base);
189 	if (boundary <= linear_backoff_thresh)
190 		timeout = ((2 << boundary) - 1) * rto_base;
191 	else
192 		timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
193 			(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
194 	return jiffies_to_msecs(timeout);
195 }
196 /**
197  *  retransmits_timed_out() - returns true if this connection has timed out
198  *  @sk:       The current socket
199  *  @boundary: max number of retransmissions
200  *  @timeout:  A custom timeout value.
201  *             If set to 0 the default timeout is calculated and used.
202  *             Using TCP_RTO_MIN and the number of unsuccessful retransmits.
203  *
204  * The default "timeout" value this function can calculate and use
205  * is equivalent to the timeout of a TCP Connection
206  * after "boundary" unsuccessful, exponentially backed-off
207  * retransmissions with an initial RTO of TCP_RTO_MIN.
208  */
retransmits_timed_out(struct sock * sk,unsigned int boundary,unsigned int timeout)209 static bool retransmits_timed_out(struct sock *sk,
210 				  unsigned int boundary,
211 				  unsigned int timeout)
212 {
213 	unsigned int start_ts;
214 
215 	if (!inet_csk(sk)->icsk_retransmits)
216 		return false;
217 
218 	start_ts = tcp_sk(sk)->retrans_stamp;
219 	if (likely(timeout == 0)) {
220 		unsigned int rto_base = TCP_RTO_MIN;
221 
222 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
223 			rto_base = tcp_timeout_init(sk);
224 		timeout = tcp_model_timeout(sk, boundary, rto_base);
225 	}
226 
227 	return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
228 }
229 
230 /* A write timeout has occurred. Process the after effects. */
tcp_write_timeout(struct sock * sk)231 static int tcp_write_timeout(struct sock *sk)
232 {
233 	struct inet_connection_sock *icsk = inet_csk(sk);
234 	struct tcp_sock *tp = tcp_sk(sk);
235 	struct net *net = sock_net(sk);
236 	bool expired = false, do_reset;
237 	int retry_until;
238 
239 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
240 		if (icsk->icsk_retransmits)
241 			__dst_negative_advice(sk);
242 		retry_until = icsk->icsk_syn_retries ? :
243 			READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
244 		expired = icsk->icsk_retransmits >= retry_until;
245 	} else {
246 		if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) {
247 			/* Black hole detection */
248 			tcp_mtu_probing(icsk, sk);
249 
250 			__dst_negative_advice(sk);
251 		}
252 
253 #ifdef CONFIG_TCP_NB_URC
254 		retry_until = tcp_get_retries_limit(sk);
255 #else
256 		retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2);
257 #endif /* CONFIG_TCP_NB_URC */
258 		if (sock_flag(sk, SOCK_DEAD)) {
259 			const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
260 
261 			retry_until = tcp_orphan_retries(sk, alive);
262 			do_reset = alive ||
263 				!retransmits_timed_out(sk, retry_until, 0);
264 
265 			if (tcp_out_of_resources(sk, do_reset))
266 				return 1;
267 		}
268 	}
269 	if (!expired)
270 		expired = retransmits_timed_out(sk, retry_until,
271 						icsk->icsk_user_timeout);
272 	tcp_fastopen_active_detect_blackhole(sk, expired);
273 
274 	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
275 		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
276 				  icsk->icsk_retransmits,
277 				  icsk->icsk_rto, (int)expired);
278 
279 	if (expired) {
280 		/* Has it gone just too far? */
281 		tcp_write_err(sk);
282 		return 1;
283 	}
284 
285 	if (sk_rethink_txhash(sk)) {
286 		tp->timeout_rehash++;
287 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
288 	}
289 
290 	return 0;
291 }
292 
293 /* Called with BH disabled */
tcp_delack_timer_handler(struct sock * sk)294 void tcp_delack_timer_handler(struct sock *sk)
295 {
296 	struct inet_connection_sock *icsk = inet_csk(sk);
297 
298 	sk_mem_reclaim_partial(sk);
299 
300 	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
301 	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
302 		goto out;
303 
304 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
305 		sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
306 		goto out;
307 	}
308 	icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
309 
310 	if (inet_csk_ack_scheduled(sk)) {
311 		if (!inet_csk_in_pingpong_mode(sk)) {
312 			/* Delayed ACK missed: inflate ATO. */
313 			icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
314 		} else {
315 			/* Delayed ACK missed: leave pingpong mode and
316 			 * deflate ATO.
317 			 */
318 			inet_csk_exit_pingpong_mode(sk);
319 			icsk->icsk_ack.ato      = TCP_ATO_MIN;
320 		}
321 		tcp_mstamp_refresh(tcp_sk(sk));
322 		tcp_send_ack(sk);
323 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
324 	}
325 
326 out:
327 	if (tcp_under_memory_pressure(sk))
328 		sk_mem_reclaim(sk);
329 }
330 
331 
332 /**
333  *  tcp_delack_timer() - The TCP delayed ACK timeout handler
334  *  @t:  Pointer to the timer. (gets casted to struct sock *)
335  *
336  *  This function gets (indirectly) called when the kernel timer for a TCP packet
337  *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
338  *
339  *  Returns: Nothing (void)
340  */
tcp_delack_timer(struct timer_list * t)341 static void tcp_delack_timer(struct timer_list *t)
342 {
343 	struct inet_connection_sock *icsk =
344 			from_timer(icsk, t, icsk_delack_timer);
345 	struct sock *sk = &icsk->icsk_inet.sk;
346 
347 	bh_lock_sock(sk);
348 	if (!sock_owned_by_user(sk)) {
349 		tcp_delack_timer_handler(sk);
350 	} else {
351 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
352 		/* deleguate our work to tcp_release_cb() */
353 		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
354 			sock_hold(sk);
355 	}
356 	bh_unlock_sock(sk);
357 	sock_put(sk);
358 }
359 
tcp_probe_timer(struct sock * sk)360 static void tcp_probe_timer(struct sock *sk)
361 {
362 	struct inet_connection_sock *icsk = inet_csk(sk);
363 	struct sk_buff *skb = tcp_send_head(sk);
364 	struct tcp_sock *tp = tcp_sk(sk);
365 	int max_probes;
366 
367 	if (tp->packets_out || !skb) {
368 		icsk->icsk_probes_out = 0;
369 		icsk->icsk_probes_tstamp = 0;
370 		return;
371 	}
372 
373 	/* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
374 	 * long as the receiver continues to respond probes. We support this by
375 	 * default and reset icsk_probes_out with incoming ACKs. But if the
376 	 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
377 	 * kill the socket when the retry count and the time exceeds the
378 	 * corresponding system limit. We also implement similar policy when
379 	 * we use RTO to probe window in tcp_retransmit_timer().
380 	 */
381 	if (!icsk->icsk_probes_tstamp)
382 		icsk->icsk_probes_tstamp = tcp_jiffies32;
383 	else if (icsk->icsk_user_timeout &&
384 		 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
385 		 msecs_to_jiffies(icsk->icsk_user_timeout))
386 		goto abort;
387 
388 #ifdef CONFIG_TCP_NB_URC
389 	max_probes = tcp_get_retries_limit(sk);
390 #else
391 	max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2);
392 #endif /* CONFIG_TCP_NB_URC */
393 	if (sock_flag(sk, SOCK_DEAD)) {
394 		const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
395 
396 		max_probes = tcp_orphan_retries(sk, alive);
397 		if (!alive && icsk->icsk_backoff >= max_probes)
398 			goto abort;
399 		if (tcp_out_of_resources(sk, true))
400 			return;
401 	}
402 
403 	if (icsk->icsk_probes_out >= max_probes) {
404 abort:		tcp_write_err(sk);
405 	} else {
406 		/* Only send another probe if we didn't close things up. */
407 		tcp_send_probe0(sk);
408 	}
409 }
410 
411 /*
412  *	Timer for Fast Open socket to retransmit SYNACK. Note that the
413  *	sk here is the child socket, not the parent (listener) socket.
414  */
tcp_fastopen_synack_timer(struct sock * sk,struct request_sock * req)415 static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
416 {
417 	struct inet_connection_sock *icsk = inet_csk(sk);
418 	struct tcp_sock *tp = tcp_sk(sk);
419 	int max_retries;
420 
421 	req->rsk_ops->syn_ack_timeout(req);
422 
423 	/* add one more retry for fastopen */
424 	max_retries = icsk->icsk_syn_retries ? :
425 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1;
426 
427 	if (req->num_timeout >= max_retries) {
428 		tcp_write_err(sk);
429 		return;
430 	}
431 	/* Lower cwnd after certain SYNACK timeout like tcp_init_transfer() */
432 	if (icsk->icsk_retransmits == 1)
433 		tcp_enter_loss(sk);
434 	/* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
435 	 * returned from rtx_syn_ack() to make it more persistent like
436 	 * regular retransmit because if the child socket has been accepted
437 	 * it's not good to give up too easily.
438 	 */
439 	inet_rtx_syn_ack(sk, req);
440 	req->num_timeout++;
441 	icsk->icsk_retransmits++;
442 	if (!tp->retrans_stamp)
443 		tp->retrans_stamp = tcp_time_stamp(tp);
444 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
445 			  TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
446 }
447 
tcp_rtx_probe0_timed_out(const struct sock * sk,const struct sk_buff * skb)448 static bool tcp_rtx_probe0_timed_out(const struct sock *sk,
449 				     const struct sk_buff *skb)
450 {
451 	const struct tcp_sock *tp = tcp_sk(sk);
452 	const int timeout = TCP_RTO_MAX * 2;
453 	u32 rcv_delta, rtx_delta;
454 
455 	rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp;
456 	if (rcv_delta <= timeout)
457 		return false;
458 
459 	rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) -
460 			(tp->retrans_stamp ?: tcp_skb_timestamp(skb)));
461 
462 	return rtx_delta > timeout;
463 }
464 
465 /**
466  *  tcp_retransmit_timer() - The TCP retransmit timeout handler
467  *  @sk:  Pointer to the current socket.
468  *
469  *  This function gets called when the kernel timer for a TCP packet
470  *  of this socket expires.
471  *
472  *  It handles retransmission, timer adjustment and other necesarry measures.
473  *
474  *  Returns: Nothing (void)
475  */
tcp_retransmit_timer(struct sock * sk)476 void tcp_retransmit_timer(struct sock *sk)
477 {
478 	struct tcp_sock *tp = tcp_sk(sk);
479 	struct net *net = sock_net(sk);
480 	struct inet_connection_sock *icsk = inet_csk(sk);
481 	struct request_sock *req;
482 	struct sk_buff *skb;
483 
484 	req = rcu_dereference_protected(tp->fastopen_rsk,
485 					lockdep_sock_is_held(sk));
486 	if (req) {
487 		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
488 			     sk->sk_state != TCP_FIN_WAIT1);
489 		tcp_fastopen_synack_timer(sk, req);
490 		/* Before we receive ACK to our SYN-ACK don't retransmit
491 		 * anything else (e.g., data or FIN segments).
492 		 */
493 		return;
494 	}
495 
496 	if (!tp->packets_out)
497 		return;
498 
499 	skb = tcp_rtx_queue_head(sk);
500 	if (WARN_ON_ONCE(!skb))
501 		return;
502 
503 	tp->tlp_high_seq = 0;
504 
505 	if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
506 	    !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
507 		/* Receiver dastardly shrinks window. Our retransmits
508 		 * become zero probes, but we should not timeout this
509 		 * connection. If the socket is an orphan, time it out,
510 		 * we cannot allow such beasts to hang infinitely.
511 		 */
512 		struct inet_sock *inet = inet_sk(sk);
513 		if (sk->sk_family == AF_INET) {
514 			net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
515 					    &inet->inet_daddr,
516 					    ntohs(inet->inet_dport),
517 					    inet->inet_num,
518 					    tp->snd_una, tp->snd_nxt);
519 		}
520 #if IS_ENABLED(CONFIG_IPV6)
521 		else if (sk->sk_family == AF_INET6) {
522 			net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
523 					    &sk->sk_v6_daddr,
524 					    ntohs(inet->inet_dport),
525 					    inet->inet_num,
526 					    tp->snd_una, tp->snd_nxt);
527 		}
528 #endif
529 		if (tcp_rtx_probe0_timed_out(sk, skb)) {
530 			tcp_write_err(sk);
531 			goto out;
532 		}
533 		tcp_enter_loss(sk);
534 		tcp_retransmit_skb(sk, skb, 1);
535 		__sk_dst_reset(sk);
536 		goto out_reset_timer;
537 	}
538 
539 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
540 	if (tcp_write_timeout(sk))
541 		goto out;
542 
543 	if (icsk->icsk_retransmits == 0) {
544 		int mib_idx = 0;
545 
546 		if (icsk->icsk_ca_state == TCP_CA_Recovery) {
547 			if (tcp_is_sack(tp))
548 				mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
549 			else
550 				mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
551 		} else if (icsk->icsk_ca_state == TCP_CA_Loss) {
552 			mib_idx = LINUX_MIB_TCPLOSSFAILURES;
553 		} else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
554 			   tp->sacked_out) {
555 			if (tcp_is_sack(tp))
556 				mib_idx = LINUX_MIB_TCPSACKFAILURES;
557 			else
558 				mib_idx = LINUX_MIB_TCPRENOFAILURES;
559 		}
560 		if (mib_idx)
561 			__NET_INC_STATS(sock_net(sk), mib_idx);
562 	}
563 
564 	tcp_enter_loss(sk);
565 
566 	icsk->icsk_retransmits++;
567 	if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
568 		/* Retransmission failed because of local congestion,
569 		 * Let senders fight for local resources conservatively.
570 		 */
571 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
572 					  TCP_RESOURCE_PROBE_INTERVAL,
573 					  TCP_RTO_MAX);
574 		goto out;
575 	}
576 
577 	/* Increase the timeout each time we retransmit.  Note that
578 	 * we do not increase the rtt estimate.  rto is initialized
579 	 * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
580 	 * that doubling rto each time is the least we can get away with.
581 	 * In KA9Q, Karn uses this for the first few times, and then
582 	 * goes to quadratic.  netBSD doubles, but only goes up to *64,
583 	 * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
584 	 * defined in the protocol as the maximum possible RTT.  I guess
585 	 * we'll have to use something other than TCP to talk to the
586 	 * University of Mars.
587 	 *
588 	 * PAWS allows us longer timeouts and large windows, so once
589 	 * implemented ftp to mars will work nicely. We will have to fix
590 	 * the 120 second clamps though!
591 	 */
592 	icsk->icsk_backoff++;
593 
594 out_reset_timer:
595 	/* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
596 	 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
597 	 * might be increased if the stream oscillates between thin and thick,
598 	 * thus the old value might already be too high compared to the value
599 	 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
600 	 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
601 	 * exponential backoff behaviour to avoid continue hammering
602 	 * linear-timeout retransmissions into a black hole
603 	 */
604 	if (sk->sk_state == TCP_ESTABLISHED &&
605 	    (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) &&
606 	    tcp_stream_is_thin(tp) &&
607 	    icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
608 		icsk->icsk_backoff = 0;
609 		icsk->icsk_rto = clamp(__tcp_set_rto(tp),
610 				       tcp_rto_min(sk),
611 				       TCP_RTO_MAX);
612 	} else {
613 		/* Use normal (exponential) backoff */
614 		icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
615 	}
616 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
617 				  tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
618 	if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0))
619 		__sk_dst_reset(sk);
620 
621 out:;
622 }
623 
624 /* Called with bottom-half processing disabled.
625    Called by tcp_write_timer() */
tcp_write_timer_handler(struct sock * sk)626 void tcp_write_timer_handler(struct sock *sk)
627 {
628 	struct inet_connection_sock *icsk = inet_csk(sk);
629 	int event;
630 
631 	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
632 	    !icsk->icsk_pending)
633 		goto out;
634 
635 	if (time_after(icsk->icsk_timeout, jiffies)) {
636 		sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
637 		goto out;
638 	}
639 
640 	tcp_mstamp_refresh(tcp_sk(sk));
641 	event = icsk->icsk_pending;
642 
643 	switch (event) {
644 	case ICSK_TIME_REO_TIMEOUT:
645 		tcp_rack_reo_timeout(sk);
646 		break;
647 	case ICSK_TIME_LOSS_PROBE:
648 		tcp_send_loss_probe(sk);
649 		break;
650 	case ICSK_TIME_RETRANS:
651 		icsk->icsk_pending = 0;
652 		tcp_retransmit_timer(sk);
653 		break;
654 	case ICSK_TIME_PROBE0:
655 		icsk->icsk_pending = 0;
656 		tcp_probe_timer(sk);
657 		break;
658 	}
659 
660 out:
661 	sk_mem_reclaim(sk);
662 }
663 
tcp_write_timer(struct timer_list * t)664 static void tcp_write_timer(struct timer_list *t)
665 {
666 	struct inet_connection_sock *icsk =
667 			from_timer(icsk, t, icsk_retransmit_timer);
668 	struct sock *sk = &icsk->icsk_inet.sk;
669 
670 	bh_lock_sock(sk);
671 	if (!sock_owned_by_user(sk)) {
672 		tcp_write_timer_handler(sk);
673 	} else {
674 		/* delegate our work to tcp_release_cb() */
675 		if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
676 			sock_hold(sk);
677 	}
678 	bh_unlock_sock(sk);
679 	sock_put(sk);
680 }
681 
tcp_syn_ack_timeout(const struct request_sock * req)682 void tcp_syn_ack_timeout(const struct request_sock *req)
683 {
684 	struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
685 
686 	__NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
687 }
688 EXPORT_SYMBOL(tcp_syn_ack_timeout);
689 
tcp_set_keepalive(struct sock * sk,int val)690 void tcp_set_keepalive(struct sock *sk, int val)
691 {
692 	if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
693 		return;
694 
695 	if (val && !sock_flag(sk, SOCK_KEEPOPEN))
696 		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
697 	else if (!val)
698 		inet_csk_delete_keepalive_timer(sk);
699 }
700 EXPORT_SYMBOL_GPL(tcp_set_keepalive);
701 
702 
tcp_keepalive_timer(struct timer_list * t)703 static void tcp_keepalive_timer (struct timer_list *t)
704 {
705 	struct sock *sk = from_timer(sk, t, sk_timer);
706 	struct inet_connection_sock *icsk = inet_csk(sk);
707 	struct tcp_sock *tp = tcp_sk(sk);
708 	u32 elapsed;
709 
710 	/* Only process if socket is not in use. */
711 	bh_lock_sock(sk);
712 	if (sock_owned_by_user(sk)) {
713 		/* Try again later. */
714 		inet_csk_reset_keepalive_timer (sk, HZ/20);
715 		goto out;
716 	}
717 
718 	if (sk->sk_state == TCP_LISTEN) {
719 		pr_err("Hmm... keepalive on a LISTEN ???\n");
720 		goto out;
721 	}
722 
723 	tcp_mstamp_refresh(tp);
724 	if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
725 		if (tp->linger2 >= 0) {
726 			const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
727 
728 			if (tmo > 0) {
729 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
730 				goto out;
731 			}
732 		}
733 		tcp_send_active_reset(sk, GFP_ATOMIC);
734 		goto death;
735 	}
736 
737 	if (!sock_flag(sk, SOCK_KEEPOPEN) ||
738 	    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
739 		goto out;
740 
741 	elapsed = keepalive_time_when(tp);
742 
743 	/* It is alive without keepalive 8) */
744 	if (tp->packets_out || !tcp_write_queue_empty(sk))
745 		goto resched;
746 
747 	elapsed = keepalive_time_elapsed(tp);
748 
749 	if (elapsed >= keepalive_time_when(tp)) {
750 		/* If the TCP_USER_TIMEOUT option is enabled, use that
751 		 * to determine when to timeout instead.
752 		 */
753 		if ((icsk->icsk_user_timeout != 0 &&
754 		    elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
755 		    icsk->icsk_probes_out > 0) ||
756 		    (icsk->icsk_user_timeout == 0 &&
757 		    icsk->icsk_probes_out >= keepalive_probes(tp))) {
758 			tcp_send_active_reset(sk, GFP_ATOMIC);
759 			tcp_write_err(sk);
760 			goto out;
761 		}
762 		if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
763 			icsk->icsk_probes_out++;
764 			elapsed = keepalive_intvl_when(tp);
765 		} else {
766 			/* If keepalive was lost due to local congestion,
767 			 * try harder.
768 			 */
769 			elapsed = TCP_RESOURCE_PROBE_INTERVAL;
770 		}
771 	} else {
772 		/* It is tp->rcv_tstamp + keepalive_time_when(tp) */
773 		elapsed = keepalive_time_when(tp) - elapsed;
774 	}
775 
776 	sk_mem_reclaim(sk);
777 
778 resched:
779 	inet_csk_reset_keepalive_timer (sk, elapsed);
780 	goto out;
781 
782 death:
783 	tcp_done(sk);
784 
785 out:
786 	bh_unlock_sock(sk);
787 	sock_put(sk);
788 }
789 
tcp_compressed_ack_kick(struct hrtimer * timer)790 static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
791 {
792 	struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
793 	struct sock *sk = (struct sock *)tp;
794 
795 	bh_lock_sock(sk);
796 	if (!sock_owned_by_user(sk)) {
797 		if (tp->compressed_ack) {
798 			/* Since we have to send one ack finally,
799 			 * substract one from tp->compressed_ack to keep
800 			 * LINUX_MIB_TCPACKCOMPRESSED accurate.
801 			 */
802 			tp->compressed_ack--;
803 			tcp_send_ack(sk);
804 		}
805 	} else {
806 		if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
807 				      &sk->sk_tsq_flags))
808 			sock_hold(sk);
809 	}
810 	bh_unlock_sock(sk);
811 
812 	sock_put(sk);
813 
814 	return HRTIMER_NORESTART;
815 }
816 
tcp_init_xmit_timers(struct sock * sk)817 void tcp_init_xmit_timers(struct sock *sk)
818 {
819 	inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
820 				  &tcp_keepalive_timer);
821 	hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
822 		     HRTIMER_MODE_ABS_PINNED_SOFT);
823 	tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
824 
825 	hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
826 		     HRTIMER_MODE_REL_PINNED_SOFT);
827 	tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
828 }
829