• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * INET		An implementation of the TCP/IP protocol suite for the LINUX
4   *		operating system.  INET is implemented using the  BSD Socket
5   *		interface as the means of communication with the user level.
6   *
7   *		Implementation of the Transmission Control Protocol(TCP).
8   *
9   * Authors:	Ross Biro
10   *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11   *		Mark Evans, <evansmp@uhura.aston.ac.uk>
12   *		Corey Minyard <wf-rch!minyard@relay.EU.net>
13   *		Florian La Roche, <flla@stud.uni-sb.de>
14   *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
15   *		Linus Torvalds, <torvalds@cs.helsinki.fi>
16   *		Alan Cox, <gw4pts@gw4pts.ampr.org>
17   *		Matthew Dillon, <dillon@apollo.west.oic.com>
18   *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
19   *		Jorge Cwik, <jorge@laser.satlink.net>
20   */
21  
22  #include <linux/mm.h>
23  #include <linux/module.h>
24  #include <linux/slab.h>
25  #include <linux/sysctl.h>
26  #include <linux/workqueue.h>
27  #include <linux/static_key.h>
28  #include <net/tcp.h>
29  #include <net/inet_common.h>
30  #include <net/xfrm.h>
31  #include <net/busy_poll.h>
32  
tcp_in_window(u32 seq,u32 end_seq,u32 s_win,u32 e_win)33  static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
34  {
35  	if (seq == s_win)
36  		return true;
37  	if (after(end_seq, s_win) && before(seq, e_win))
38  		return true;
39  	return seq == e_win && seq == end_seq;
40  }
41  
42  static enum tcp_tw_status
tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock * tw,const struct sk_buff * skb,int mib_idx)43  tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
44  				  const struct sk_buff *skb, int mib_idx)
45  {
46  	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
47  
48  	if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
49  				  &tcptw->tw_last_oow_ack_time)) {
50  		/* Send ACK. Note, we do not put the bucket,
51  		 * it will be released by caller.
52  		 */
53  		return TCP_TW_ACK;
54  	}
55  
56  	/* We are rate-limiting, so just release the tw sock and drop skb. */
57  	inet_twsk_put(tw);
58  	return TCP_TW_SUCCESS;
59  }
60  
61  /*
62   * * Main purpose of TIME-WAIT state is to close connection gracefully,
63   *   when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
64   *   (and, probably, tail of data) and one or more our ACKs are lost.
65   * * What is TIME-WAIT timeout? It is associated with maximal packet
66   *   lifetime in the internet, which results in wrong conclusion, that
67   *   it is set to catch "old duplicate segments" wandering out of their path.
68   *   It is not quite correct. This timeout is calculated so that it exceeds
69   *   maximal retransmission timeout enough to allow to lose one (or more)
70   *   segments sent by peer and our ACKs. This time may be calculated from RTO.
71   * * When TIME-WAIT socket receives RST, it means that another end
72   *   finally closed and we are allowed to kill TIME-WAIT too.
73   * * Second purpose of TIME-WAIT is catching old duplicate segments.
74   *   Well, certainly it is pure paranoia, but if we load TIME-WAIT
75   *   with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
76   * * If we invented some more clever way to catch duplicates
77   *   (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
78   *
79   * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
80   * When you compare it to RFCs, please, read section SEGMENT ARRIVES
81   * from the very beginning.
82   *
83   * NOTE. With recycling (and later with fin-wait-2) TW bucket
84   * is _not_ stateless. It means, that strictly speaking we must
85   * spinlock it. I do not want! Well, probability of misbehaviour
86   * is ridiculously low and, seems, we could use some mb() tricks
87   * to avoid misread sequence numbers, states etc.  --ANK
88   *
89   * We don't need to initialize tmp_out.sack_ok as we don't use the results
90   */
91  enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock * tw,struct sk_buff * skb,const struct tcphdr * th)92  tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93  			   const struct tcphdr *th)
94  {
95  	struct tcp_options_received tmp_opt;
96  	struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
97  	bool paws_reject = false;
98  
99  	tmp_opt.saw_tstamp = 0;
100  	if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101  		tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
102  
103  		if (tmp_opt.saw_tstamp) {
104  			if (tmp_opt.rcv_tsecr)
105  				tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
106  			tmp_opt.ts_recent	= tcptw->tw_ts_recent;
107  			tmp_opt.ts_recent_stamp	= tcptw->tw_ts_recent_stamp;
108  			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
109  		}
110  	}
111  
112  	if (tw->tw_substate == TCP_FIN_WAIT2) {
113  		/* Just repeat all the checks of tcp_rcv_state_process() */
114  
115  		/* Out of window, send ACK */
116  		if (paws_reject ||
117  		    !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
118  				   tcptw->tw_rcv_nxt,
119  				   tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
120  			return tcp_timewait_check_oow_rate_limit(
121  				tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
122  
123  		if (th->rst)
124  			goto kill;
125  
126  		if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
127  			return TCP_TW_RST;
128  
129  		/* Dup ACK? */
130  		if (!th->ack ||
131  		    !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
132  		    TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
133  			inet_twsk_put(tw);
134  			return TCP_TW_SUCCESS;
135  		}
136  
137  		/* New data or FIN. If new data arrive after half-duplex close,
138  		 * reset.
139  		 */
140  		if (!th->fin ||
141  		    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
142  			return TCP_TW_RST;
143  
144  		/* FIN arrived, enter true time-wait state. */
145  		tw->tw_substate	  = TCP_TIME_WAIT;
146  		tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
147  		if (tmp_opt.saw_tstamp) {
148  			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
149  			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
150  		}
151  
152  		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
153  		return TCP_TW_ACK;
154  	}
155  
156  	/*
157  	 *	Now real TIME-WAIT state.
158  	 *
159  	 *	RFC 1122:
160  	 *	"When a connection is [...] on TIME-WAIT state [...]
161  	 *	[a TCP] MAY accept a new SYN from the remote TCP to
162  	 *	reopen the connection directly, if it:
163  	 *
164  	 *	(1)  assigns its initial sequence number for the new
165  	 *	connection to be larger than the largest sequence
166  	 *	number it used on the previous connection incarnation,
167  	 *	and
168  	 *
169  	 *	(2)  returns to TIME-WAIT state if the SYN turns out
170  	 *	to be an old duplicate".
171  	 */
172  
173  	if (!paws_reject &&
174  	    (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
175  	     (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
176  		/* In window segment, it may be only reset or bare ack. */
177  
178  		if (th->rst) {
179  			/* This is TIME_WAIT assassination, in two flavors.
180  			 * Oh well... nobody has a sufficient solution to this
181  			 * protocol bug yet.
182  			 */
183  			if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) {
184  kill:
185  				inet_twsk_deschedule_put(tw);
186  				return TCP_TW_SUCCESS;
187  			}
188  		} else {
189  			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
190  		}
191  
192  		if (tmp_opt.saw_tstamp) {
193  			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
194  			tcptw->tw_ts_recent_stamp = ktime_get_seconds();
195  		}
196  
197  		inet_twsk_put(tw);
198  		return TCP_TW_SUCCESS;
199  	}
200  
201  	/* Out of window segment.
202  
203  	   All the segments are ACKed immediately.
204  
205  	   The only exception is new SYN. We accept it, if it is
206  	   not old duplicate and we are not in danger to be killed
207  	   by delayed old duplicates. RFC check is that it has
208  	   newer sequence number works at rates <40Mbit/sec.
209  	   However, if paws works, it is reliable AND even more,
210  	   we even may relax silly seq space cutoff.
211  
212  	   RED-PEN: we violate main RFC requirement, if this SYN will appear
213  	   old duplicate (i.e. we receive RST in reply to SYN-ACK),
214  	   we must return socket to time-wait state. It is not good,
215  	   but not fatal yet.
216  	 */
217  
218  	if (th->syn && !th->rst && !th->ack && !paws_reject &&
219  	    (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
220  	     (tmp_opt.saw_tstamp &&
221  	      (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
222  		u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
223  		if (isn == 0)
224  			isn++;
225  		TCP_SKB_CB(skb)->tcp_tw_isn = isn;
226  		return TCP_TW_SYN;
227  	}
228  
229  	if (paws_reject)
230  		__NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
231  
232  	if (!th->rst) {
233  		/* In this case we must reset the TIMEWAIT timer.
234  		 *
235  		 * If it is ACKless SYN it may be both old duplicate
236  		 * and new good SYN with random sequence number <rcv_nxt.
237  		 * Do not reschedule in the last case.
238  		 */
239  		if (paws_reject || th->ack)
240  			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
241  
242  		return tcp_timewait_check_oow_rate_limit(
243  			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
244  	}
245  	inet_twsk_put(tw);
246  	return TCP_TW_SUCCESS;
247  }
248  EXPORT_SYMBOL(tcp_timewait_state_process);
249  
250  /*
251   * Move a socket to time-wait or dead fin-wait-2 state.
252   */
tcp_time_wait(struct sock * sk,int state,int timeo)253  void tcp_time_wait(struct sock *sk, int state, int timeo)
254  {
255  	const struct inet_connection_sock *icsk = inet_csk(sk);
256  	const struct tcp_sock *tp = tcp_sk(sk);
257  	struct inet_timewait_sock *tw;
258  	struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
259  
260  	tw = inet_twsk_alloc(sk, tcp_death_row, state);
261  
262  	if (tw) {
263  		struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
264  		const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
265  		struct inet_sock *inet = inet_sk(sk);
266  
267  		tw->tw_transparent	= inet->transparent;
268  		tw->tw_mark		= sk->sk_mark;
269  		tw->tw_priority		= sk->sk_priority;
270  		tw->tw_rcv_wscale	= tp->rx_opt.rcv_wscale;
271  		tcptw->tw_rcv_nxt	= tp->rcv_nxt;
272  		tcptw->tw_snd_nxt	= tp->snd_nxt;
273  		tcptw->tw_rcv_wnd	= tcp_receive_window(tp);
274  		tcptw->tw_ts_recent	= tp->rx_opt.ts_recent;
275  		tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
276  		tcptw->tw_ts_offset	= tp->tsoffset;
277  		tcptw->tw_last_oow_ack_time = 0;
278  		tcptw->tw_tx_delay	= tp->tcp_tx_delay;
279  #if IS_ENABLED(CONFIG_IPV6)
280  		if (tw->tw_family == PF_INET6) {
281  			struct ipv6_pinfo *np = inet6_sk(sk);
282  
283  			tw->tw_v6_daddr = sk->sk_v6_daddr;
284  			tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
285  			tw->tw_tclass = np->tclass;
286  			tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
287  			tw->tw_txhash = sk->sk_txhash;
288  			tw->tw_ipv6only = sk->sk_ipv6only;
289  		}
290  #endif
291  
292  #ifdef CONFIG_TCP_MD5SIG
293  		/*
294  		 * The timewait bucket does not have the key DB from the
295  		 * sock structure. We just make a quick copy of the
296  		 * md5 key being used (if indeed we are using one)
297  		 * so the timewait ack generating code has the key.
298  		 */
299  		do {
300  			tcptw->tw_md5_key = NULL;
301  			if (static_branch_unlikely(&tcp_md5_needed)) {
302  				struct tcp_md5sig_key *key;
303  
304  				key = tp->af_specific->md5_lookup(sk, sk);
305  				if (key) {
306  					tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
307  					BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool());
308  				}
309  			}
310  		} while (0);
311  #endif
312  
313  		/* Get the TIME_WAIT timeout firing. */
314  		if (timeo < rto)
315  			timeo = rto;
316  
317  		if (state == TCP_TIME_WAIT)
318  			timeo = TCP_TIMEWAIT_LEN;
319  
320  		/* tw_timer is pinned, so we need to make sure BH are disabled
321  		 * in following section, otherwise timer handler could run before
322  		 * we complete the initialization.
323  		 */
324  		local_bh_disable();
325  		inet_twsk_schedule(tw, timeo);
326  		/* Linkage updates.
327  		 * Note that access to tw after this point is illegal.
328  		 */
329  		inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
330  		local_bh_enable();
331  	} else {
332  		/* Sorry, if we're out of memory, just CLOSE this
333  		 * socket up.  We've got bigger problems than
334  		 * non-graceful socket closings.
335  		 */
336  		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
337  	}
338  
339  	tcp_update_metrics(sk);
340  	tcp_done(sk);
341  }
342  EXPORT_SYMBOL(tcp_time_wait);
343  
tcp_twsk_destructor(struct sock * sk)344  void tcp_twsk_destructor(struct sock *sk)
345  {
346  #ifdef CONFIG_TCP_MD5SIG
347  	if (static_branch_unlikely(&tcp_md5_needed)) {
348  		struct tcp_timewait_sock *twsk = tcp_twsk(sk);
349  
350  		if (twsk->tw_md5_key)
351  			kfree_rcu(twsk->tw_md5_key, rcu);
352  	}
353  #endif
354  }
355  EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
356  
357  /* Warning : This function is called without sk_listener being locked.
358   * Be sure to read socket fields once, as their value could change under us.
359   */
tcp_openreq_init_rwin(struct request_sock * req,const struct sock * sk_listener,const struct dst_entry * dst)360  void tcp_openreq_init_rwin(struct request_sock *req,
361  			   const struct sock *sk_listener,
362  			   const struct dst_entry *dst)
363  {
364  	struct inet_request_sock *ireq = inet_rsk(req);
365  	const struct tcp_sock *tp = tcp_sk(sk_listener);
366  	int full_space = tcp_full_space(sk_listener);
367  	u32 window_clamp;
368  	__u8 rcv_wscale;
369  	u32 rcv_wnd;
370  	int mss;
371  
372  	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
373  	window_clamp = READ_ONCE(tp->window_clamp);
374  	/* Set this up on the first call only */
375  	req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
376  
377  	/* limit the window selection if the user enforce a smaller rx buffer */
378  	if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
379  	    (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
380  		req->rsk_window_clamp = full_space;
381  
382  	rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
383  	if (rcv_wnd == 0)
384  		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
385  	else if (full_space < rcv_wnd * mss)
386  		full_space = rcv_wnd * mss;
387  
388  	/* tcp_full_space because it is guaranteed to be the first packet */
389  	tcp_select_initial_window(sk_listener, full_space,
390  		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
391  		&req->rsk_rcv_wnd,
392  		&req->rsk_window_clamp,
393  		ireq->wscale_ok,
394  		&rcv_wscale,
395  		rcv_wnd);
396  	ireq->rcv_wscale = rcv_wscale;
397  }
398  EXPORT_SYMBOL(tcp_openreq_init_rwin);
399  
tcp_ecn_openreq_child(struct tcp_sock * tp,const struct request_sock * req)400  static void tcp_ecn_openreq_child(struct tcp_sock *tp,
401  				  const struct request_sock *req)
402  {
403  	tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
404  }
405  
tcp_ca_openreq_child(struct sock * sk,const struct dst_entry * dst)406  void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
407  {
408  	struct inet_connection_sock *icsk = inet_csk(sk);
409  	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
410  	bool ca_got_dst = false;
411  
412  	if (ca_key != TCP_CA_UNSPEC) {
413  		const struct tcp_congestion_ops *ca;
414  
415  		rcu_read_lock();
416  		ca = tcp_ca_find_key(ca_key);
417  		if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
418  			icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
419  			icsk->icsk_ca_ops = ca;
420  			ca_got_dst = true;
421  		}
422  		rcu_read_unlock();
423  	}
424  
425  	/* If no valid choice made yet, assign current system default ca. */
426  	if (!ca_got_dst &&
427  	    (!icsk->icsk_ca_setsockopt ||
428  	     !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
429  		tcp_assign_congestion_control(sk);
430  
431  	tcp_set_ca_state(sk, TCP_CA_Open);
432  }
433  EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
434  
smc_check_reset_syn_req(struct tcp_sock * oldtp,struct request_sock * req,struct tcp_sock * newtp)435  static void smc_check_reset_syn_req(struct tcp_sock *oldtp,
436  				    struct request_sock *req,
437  				    struct tcp_sock *newtp)
438  {
439  #if IS_ENABLED(CONFIG_SMC)
440  	struct inet_request_sock *ireq;
441  
442  	if (static_branch_unlikely(&tcp_have_smc)) {
443  		ireq = inet_rsk(req);
444  		if (oldtp->syn_smc && !ireq->smc_ok)
445  			newtp->syn_smc = 0;
446  	}
447  #endif
448  }
449  
450  /* This is not only more efficient than what we used to do, it eliminates
451   * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
452   *
453   * Actually, we could lots of memory writes here. tp of listening
454   * socket contains all necessary default parameters.
455   */
tcp_create_openreq_child(const struct sock * sk,struct request_sock * req,struct sk_buff * skb)456  struct sock *tcp_create_openreq_child(const struct sock *sk,
457  				      struct request_sock *req,
458  				      struct sk_buff *skb)
459  {
460  	struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
461  	const struct inet_request_sock *ireq = inet_rsk(req);
462  	struct tcp_request_sock *treq = tcp_rsk(req);
463  	struct inet_connection_sock *newicsk;
464  	struct tcp_sock *oldtp, *newtp;
465  	u32 seq;
466  
467  	if (!newsk)
468  		return NULL;
469  
470  	newicsk = inet_csk(newsk);
471  	newtp = tcp_sk(newsk);
472  	oldtp = tcp_sk(sk);
473  
474  	smc_check_reset_syn_req(oldtp, req, newtp);
475  
476  	/* Now setup tcp_sock */
477  	newtp->pred_flags = 0;
478  
479  	seq = treq->rcv_isn + 1;
480  	newtp->rcv_wup = seq;
481  	WRITE_ONCE(newtp->copied_seq, seq);
482  	WRITE_ONCE(newtp->rcv_nxt, seq);
483  	newtp->segs_in = 1;
484  
485  	seq = treq->snt_isn + 1;
486  	newtp->snd_sml = newtp->snd_una = seq;
487  	WRITE_ONCE(newtp->snd_nxt, seq);
488  	newtp->snd_up = seq;
489  
490  	INIT_LIST_HEAD(&newtp->tsq_node);
491  	INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
492  
493  	tcp_init_wl(newtp, treq->rcv_isn);
494  
495  	minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
496  	newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
497  
498  	newtp->lsndtime = tcp_jiffies32;
499  	newsk->sk_txhash = treq->txhash;
500  	newtp->total_retrans = req->num_retrans;
501  
502  	tcp_init_xmit_timers(newsk);
503  	WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
504  
505  	if (sock_flag(newsk, SOCK_KEEPOPEN))
506  		inet_csk_reset_keepalive_timer(newsk,
507  					       keepalive_time_when(newtp));
508  
509  	newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
510  	newtp->rx_opt.sack_ok = ireq->sack_ok;
511  	newtp->window_clamp = req->rsk_window_clamp;
512  	newtp->rcv_ssthresh = req->rsk_rcv_wnd;
513  	newtp->rcv_wnd = req->rsk_rcv_wnd;
514  	newtp->rx_opt.wscale_ok = ireq->wscale_ok;
515  	if (newtp->rx_opt.wscale_ok) {
516  		newtp->rx_opt.snd_wscale = ireq->snd_wscale;
517  		newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
518  	} else {
519  		newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
520  		newtp->window_clamp = min(newtp->window_clamp, 65535U);
521  	}
522  	newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
523  	newtp->max_window = newtp->snd_wnd;
524  
525  	if (newtp->rx_opt.tstamp_ok) {
526  		newtp->rx_opt.ts_recent = req->ts_recent;
527  		newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
528  		newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
529  	} else {
530  		newtp->rx_opt.ts_recent_stamp = 0;
531  		newtp->tcp_header_len = sizeof(struct tcphdr);
532  	}
533  	if (req->num_timeout) {
534  		newtp->undo_marker = treq->snt_isn;
535  		newtp->retrans_stamp = div_u64(treq->snt_synack,
536  					       USEC_PER_SEC / TCP_TS_HZ);
537  	}
538  	newtp->tsoffset = treq->ts_off;
539  #ifdef CONFIG_TCP_MD5SIG
540  	newtp->md5sig_info = NULL;	/*XXX*/
541  	if (newtp->af_specific->md5_lookup(sk, newsk))
542  		newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
543  #endif
544  	if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
545  		newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
546  	newtp->rx_opt.mss_clamp = req->mss;
547  	tcp_ecn_openreq_child(newtp, req);
548  	newtp->fastopen_req = NULL;
549  	RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
550  
551  	tcp_bpf_clone(sk, newsk);
552  
553  	__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
554  
555  	return newsk;
556  }
557  EXPORT_SYMBOL(tcp_create_openreq_child);
558  
559  /*
560   * Process an incoming packet for SYN_RECV sockets represented as a
561   * request_sock. Normally sk is the listener socket but for TFO it
562   * points to the child socket.
563   *
564   * XXX (TFO) - The current impl contains a special check for ack
565   * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
566   *
567   * We don't need to initialize tmp_opt.sack_ok as we don't use the results
568   */
569  
tcp_check_req(struct sock * sk,struct sk_buff * skb,struct request_sock * req,bool fastopen,bool * req_stolen)570  struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
571  			   struct request_sock *req,
572  			   bool fastopen, bool *req_stolen)
573  {
574  	struct tcp_options_received tmp_opt;
575  	struct sock *child;
576  	const struct tcphdr *th = tcp_hdr(skb);
577  	__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
578  	bool paws_reject = false;
579  	bool own_req;
580  
581  	tmp_opt.saw_tstamp = 0;
582  	if (th->doff > (sizeof(struct tcphdr)>>2)) {
583  		tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
584  
585  		if (tmp_opt.saw_tstamp) {
586  			tmp_opt.ts_recent = req->ts_recent;
587  			if (tmp_opt.rcv_tsecr)
588  				tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
589  			/* We do not store true stamp, but it is not required,
590  			 * it can be estimated (approximately)
591  			 * from another data.
592  			 */
593  			tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
594  			paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
595  		}
596  	}
597  
598  	/* Check for pure retransmitted SYN. */
599  	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
600  	    flg == TCP_FLAG_SYN &&
601  	    !paws_reject) {
602  		/*
603  		 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
604  		 * this case on figure 6 and figure 8, but formal
605  		 * protocol description says NOTHING.
606  		 * To be more exact, it says that we should send ACK,
607  		 * because this segment (at least, if it has no data)
608  		 * is out of window.
609  		 *
610  		 *  CONCLUSION: RFC793 (even with RFC1122) DOES NOT
611  		 *  describe SYN-RECV state. All the description
612  		 *  is wrong, we cannot believe to it and should
613  		 *  rely only on common sense and implementation
614  		 *  experience.
615  		 *
616  		 * Enforce "SYN-ACK" according to figure 8, figure 6
617  		 * of RFC793, fixed by RFC1122.
618  		 *
619  		 * Note that even if there is new data in the SYN packet
620  		 * they will be thrown away too.
621  		 *
622  		 * Reset timer after retransmitting SYNACK, similar to
623  		 * the idea of fast retransmit in recovery.
624  		 */
625  		if (!tcp_oow_rate_limited(sock_net(sk), skb,
626  					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
627  					  &tcp_rsk(req)->last_oow_ack_time) &&
628  
629  		    !inet_rtx_syn_ack(sk, req)) {
630  			unsigned long expires = jiffies;
631  
632  			expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
633  				       TCP_RTO_MAX);
634  			if (!fastopen)
635  				mod_timer_pending(&req->rsk_timer, expires);
636  			else
637  				req->rsk_timer.expires = expires;
638  		}
639  		return NULL;
640  	}
641  
642  	/* Further reproduces section "SEGMENT ARRIVES"
643  	   for state SYN-RECEIVED of RFC793.
644  	   It is broken, however, it does not work only
645  	   when SYNs are crossed.
646  
647  	   You would think that SYN crossing is impossible here, since
648  	   we should have a SYN_SENT socket (from connect()) on our end,
649  	   but this is not true if the crossed SYNs were sent to both
650  	   ends by a malicious third party.  We must defend against this,
651  	   and to do that we first verify the ACK (as per RFC793, page
652  	   36) and reset if it is invalid.  Is this a true full defense?
653  	   To convince ourselves, let us consider a way in which the ACK
654  	   test can still pass in this 'malicious crossed SYNs' case.
655  	   Malicious sender sends identical SYNs (and thus identical sequence
656  	   numbers) to both A and B:
657  
658  		A: gets SYN, seq=7
659  		B: gets SYN, seq=7
660  
661  	   By our good fortune, both A and B select the same initial
662  	   send sequence number of seven :-)
663  
664  		A: sends SYN|ACK, seq=7, ack_seq=8
665  		B: sends SYN|ACK, seq=7, ack_seq=8
666  
667  	   So we are now A eating this SYN|ACK, ACK test passes.  So
668  	   does sequence test, SYN is truncated, and thus we consider
669  	   it a bare ACK.
670  
671  	   If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
672  	   bare ACK.  Otherwise, we create an established connection.  Both
673  	   ends (listening sockets) accept the new incoming connection and try
674  	   to talk to each other. 8-)
675  
676  	   Note: This case is both harmless, and rare.  Possibility is about the
677  	   same as us discovering intelligent life on another plant tomorrow.
678  
679  	   But generally, we should (RFC lies!) to accept ACK
680  	   from SYNACK both here and in tcp_rcv_state_process().
681  	   tcp_rcv_state_process() does not, hence, we do not too.
682  
683  	   Note that the case is absolutely generic:
684  	   we cannot optimize anything here without
685  	   violating protocol. All the checks must be made
686  	   before attempt to create socket.
687  	 */
688  
689  	/* RFC793 page 36: "If the connection is in any non-synchronized state ...
690  	 *                  and the incoming segment acknowledges something not yet
691  	 *                  sent (the segment carries an unacceptable ACK) ...
692  	 *                  a reset is sent."
693  	 *
694  	 * Invalid ACK: reset will be sent by listening socket.
695  	 * Note that the ACK validity check for a Fast Open socket is done
696  	 * elsewhere and is checked directly against the child socket rather
697  	 * than req because user data may have been sent out.
698  	 */
699  	if ((flg & TCP_FLAG_ACK) && !fastopen &&
700  	    (TCP_SKB_CB(skb)->ack_seq !=
701  	     tcp_rsk(req)->snt_isn + 1))
702  		return sk;
703  
704  	/* Also, it would be not so bad idea to check rcv_tsecr, which
705  	 * is essentially ACK extension and too early or too late values
706  	 * should cause reset in unsynchronized states.
707  	 */
708  
709  	/* RFC793: "first check sequence number". */
710  
711  	if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
712  					  tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
713  		/* Out of window: send ACK and drop. */
714  		if (!(flg & TCP_FLAG_RST) &&
715  		    !tcp_oow_rate_limited(sock_net(sk), skb,
716  					  LINUX_MIB_TCPACKSKIPPEDSYNRECV,
717  					  &tcp_rsk(req)->last_oow_ack_time))
718  			req->rsk_ops->send_ack(sk, skb, req);
719  		if (paws_reject)
720  			__NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
721  		return NULL;
722  	}
723  
724  	/* In sequence, PAWS is OK. */
725  
726  	if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
727  		req->ts_recent = tmp_opt.rcv_tsval;
728  
729  	if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
730  		/* Truncate SYN, it is out of window starting
731  		   at tcp_rsk(req)->rcv_isn + 1. */
732  		flg &= ~TCP_FLAG_SYN;
733  	}
734  
735  	/* RFC793: "second check the RST bit" and
736  	 *	   "fourth, check the SYN bit"
737  	 */
738  	if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
739  		__TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
740  		goto embryonic_reset;
741  	}
742  
743  	/* ACK sequence verified above, just make sure ACK is
744  	 * set.  If ACK not set, just silently drop the packet.
745  	 *
746  	 * XXX (TFO) - if we ever allow "data after SYN", the
747  	 * following check needs to be removed.
748  	 */
749  	if (!(flg & TCP_FLAG_ACK))
750  		return NULL;
751  
752  	/* For Fast Open no more processing is needed (sk is the
753  	 * child socket).
754  	 */
755  	if (fastopen)
756  		return sk;
757  
758  	/* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
759  	if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
760  	    TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
761  		inet_rsk(req)->acked = 1;
762  		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
763  		return NULL;
764  	}
765  
766  	/* OK, ACK is valid, create big socket and
767  	 * feed this segment to it. It will repeat all
768  	 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
769  	 * ESTABLISHED STATE. If it will be dropped after
770  	 * socket is created, wait for troubles.
771  	 */
772  	child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
773  							 req, &own_req);
774  	if (!child)
775  		goto listen_overflow;
776  
777  	if (own_req && rsk_drop_req(req)) {
778  		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
779  		inet_csk_reqsk_queue_drop_and_put(sk, req);
780  		return child;
781  	}
782  
783  	sock_rps_save_rxhash(child, skb);
784  	tcp_synack_rtt_meas(child, req);
785  	*req_stolen = !own_req;
786  	return inet_csk_complete_hashdance(sk, child, req, own_req);
787  
788  listen_overflow:
789  	if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) {
790  		inet_rsk(req)->acked = 1;
791  		return NULL;
792  	}
793  
794  embryonic_reset:
795  	if (!(flg & TCP_FLAG_RST)) {
796  		/* Received a bad SYN pkt - for TFO We try not to reset
797  		 * the local connection unless it's really necessary to
798  		 * avoid becoming vulnerable to outside attack aiming at
799  		 * resetting legit local connections.
800  		 */
801  		req->rsk_ops->send_reset(sk, skb);
802  	} else if (fastopen) { /* received a valid RST pkt */
803  		reqsk_fastopen_remove(sk, req, true);
804  		tcp_reset(sk);
805  	}
806  	if (!fastopen) {
807  		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
808  
809  		if (unlinked)
810  			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
811  		*req_stolen = !unlinked;
812  	}
813  	return NULL;
814  }
815  EXPORT_SYMBOL(tcp_check_req);
816  
817  /*
818   * Queue segment on the new socket if the new socket is active,
819   * otherwise we just shortcircuit this and continue with
820   * the new socket.
821   *
822   * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
823   * when entering. But other states are possible due to a race condition
824   * where after __inet_lookup_established() fails but before the listener
825   * locked is obtained, other packets cause the same connection to
826   * be created.
827   */
828  
tcp_child_process(struct sock * parent,struct sock * child,struct sk_buff * skb)829  int tcp_child_process(struct sock *parent, struct sock *child,
830  		      struct sk_buff *skb)
831  	__releases(&((child)->sk_lock.slock))
832  {
833  	int ret = 0;
834  	int state = child->sk_state;
835  
836  	/* record NAPI ID of child */
837  	sk_mark_napi_id(child, skb);
838  
839  	tcp_segs_in(tcp_sk(child), skb);
840  	if (!sock_owned_by_user(child)) {
841  		ret = tcp_rcv_state_process(child, skb);
842  		/* Wakeup parent, send SIGIO */
843  		if (state == TCP_SYN_RECV && child->sk_state != state)
844  			parent->sk_data_ready(parent);
845  	} else {
846  		/* Alas, it is possible again, because we do lookup
847  		 * in main socket hash table and lock on listening
848  		 * socket does not protect us more.
849  		 */
850  		__sk_add_backlog(child, skb);
851  	}
852  
853  	bh_unlock_sock(child);
854  	sock_put(child);
855  	return ret;
856  }
857  EXPORT_SYMBOL(tcp_child_process);
858