1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 #include <net/xfrm.h>
29
30 int sysctl_tcp_abort_on_overflow __read_mostly;
31
32 struct inet_timewait_death_row tcp_death_row = {
33 .sysctl_max_tw_buckets = NR_FILE * 2,
34 .hashinfo = &tcp_hashinfo,
35 };
36 EXPORT_SYMBOL_GPL(tcp_death_row);
37
tcp_in_window(u32 seq,u32 end_seq,u32 s_win,u32 e_win)38 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
39 {
40 if (seq == s_win)
41 return true;
42 if (after(end_seq, s_win) && before(seq, e_win))
43 return true;
44 return seq == e_win && seq == end_seq;
45 }
46
47 static enum tcp_tw_status
tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock * tw,const struct sk_buff * skb,int mib_idx)48 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
49 const struct sk_buff *skb, int mib_idx)
50 {
51 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
52
53 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
54 &tcptw->tw_last_oow_ack_time)) {
55 /* Send ACK. Note, we do not put the bucket,
56 * it will be released by caller.
57 */
58 return TCP_TW_ACK;
59 }
60
61 /* We are rate-limiting, so just release the tw sock and drop skb. */
62 inet_twsk_put(tw);
63 return TCP_TW_SUCCESS;
64 }
65
66 /*
67 * * Main purpose of TIME-WAIT state is to close connection gracefully,
68 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
69 * (and, probably, tail of data) and one or more our ACKs are lost.
70 * * What is TIME-WAIT timeout? It is associated with maximal packet
71 * lifetime in the internet, which results in wrong conclusion, that
72 * it is set to catch "old duplicate segments" wandering out of their path.
73 * It is not quite correct. This timeout is calculated so that it exceeds
74 * maximal retransmission timeout enough to allow to lose one (or more)
75 * segments sent by peer and our ACKs. This time may be calculated from RTO.
76 * * When TIME-WAIT socket receives RST, it means that another end
77 * finally closed and we are allowed to kill TIME-WAIT too.
78 * * Second purpose of TIME-WAIT is catching old duplicate segments.
79 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
80 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
81 * * If we invented some more clever way to catch duplicates
82 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
83 *
84 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
85 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
86 * from the very beginning.
87 *
88 * NOTE. With recycling (and later with fin-wait-2) TW bucket
89 * is _not_ stateless. It means, that strictly speaking we must
90 * spinlock it. I do not want! Well, probability of misbehaviour
91 * is ridiculously low and, seems, we could use some mb() tricks
92 * to avoid misread sequence numbers, states etc. --ANK
93 *
94 * We don't need to initialize tmp_out.sack_ok as we don't use the results
95 */
96 enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock * tw,struct sk_buff * skb,const struct tcphdr * th)97 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
98 const struct tcphdr *th)
99 {
100 struct tcp_options_received tmp_opt;
101 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
102 bool paws_reject = false;
103
104 tmp_opt.saw_tstamp = 0;
105 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
106 tcp_parse_options(skb, &tmp_opt, 0, NULL);
107
108 if (tmp_opt.saw_tstamp) {
109 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
110 tmp_opt.ts_recent = tcptw->tw_ts_recent;
111 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
112 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
113 }
114 }
115
116 if (tw->tw_substate == TCP_FIN_WAIT2) {
117 /* Just repeat all the checks of tcp_rcv_state_process() */
118
119 /* Out of window, send ACK */
120 if (paws_reject ||
121 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
122 tcptw->tw_rcv_nxt,
123 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
124 return tcp_timewait_check_oow_rate_limit(
125 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
126
127 if (th->rst)
128 goto kill;
129
130 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
131 return TCP_TW_RST;
132
133 /* Dup ACK? */
134 if (!th->ack ||
135 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
136 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
137 inet_twsk_put(tw);
138 return TCP_TW_SUCCESS;
139 }
140
141 /* New data or FIN. If new data arrive after half-duplex close,
142 * reset.
143 */
144 if (!th->fin ||
145 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
146 return TCP_TW_RST;
147
148 /* FIN arrived, enter true time-wait state. */
149 tw->tw_substate = TCP_TIME_WAIT;
150 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
151 if (tmp_opt.saw_tstamp) {
152 tcptw->tw_ts_recent_stamp = get_seconds();
153 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
154 }
155
156 if (tcp_death_row.sysctl_tw_recycle &&
157 tcptw->tw_ts_recent_stamp &&
158 tcp_tw_remember_stamp(tw))
159 inet_twsk_reschedule(tw, tw->tw_timeout);
160 else
161 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
162 return TCP_TW_ACK;
163 }
164
165 /*
166 * Now real TIME-WAIT state.
167 *
168 * RFC 1122:
169 * "When a connection is [...] on TIME-WAIT state [...]
170 * [a TCP] MAY accept a new SYN from the remote TCP to
171 * reopen the connection directly, if it:
172 *
173 * (1) assigns its initial sequence number for the new
174 * connection to be larger than the largest sequence
175 * number it used on the previous connection incarnation,
176 * and
177 *
178 * (2) returns to TIME-WAIT state if the SYN turns out
179 * to be an old duplicate".
180 */
181
182 if (!paws_reject &&
183 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
184 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
185 /* In window segment, it may be only reset or bare ack. */
186
187 if (th->rst) {
188 /* This is TIME_WAIT assassination, in two flavors.
189 * Oh well... nobody has a sufficient solution to this
190 * protocol bug yet.
191 */
192 if (sysctl_tcp_rfc1337 == 0) {
193 kill:
194 inet_twsk_deschedule_put(tw);
195 return TCP_TW_SUCCESS;
196 }
197 }
198 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
199
200 if (tmp_opt.saw_tstamp) {
201 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
202 tcptw->tw_ts_recent_stamp = get_seconds();
203 }
204
205 inet_twsk_put(tw);
206 return TCP_TW_SUCCESS;
207 }
208
209 /* Out of window segment.
210
211 All the segments are ACKed immediately.
212
213 The only exception is new SYN. We accept it, if it is
214 not old duplicate and we are not in danger to be killed
215 by delayed old duplicates. RFC check is that it has
216 newer sequence number works at rates <40Mbit/sec.
217 However, if paws works, it is reliable AND even more,
218 we even may relax silly seq space cutoff.
219
220 RED-PEN: we violate main RFC requirement, if this SYN will appear
221 old duplicate (i.e. we receive RST in reply to SYN-ACK),
222 we must return socket to time-wait state. It is not good,
223 but not fatal yet.
224 */
225
226 if (th->syn && !th->rst && !th->ack && !paws_reject &&
227 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
228 (tmp_opt.saw_tstamp &&
229 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
230 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
231 if (isn == 0)
232 isn++;
233 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
234 return TCP_TW_SYN;
235 }
236
237 if (paws_reject)
238 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
239
240 if (!th->rst) {
241 /* In this case we must reset the TIMEWAIT timer.
242 *
243 * If it is ACKless SYN it may be both old duplicate
244 * and new good SYN with random sequence number <rcv_nxt.
245 * Do not reschedule in the last case.
246 */
247 if (paws_reject || th->ack)
248 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
249
250 return tcp_timewait_check_oow_rate_limit(
251 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
252 }
253 inet_twsk_put(tw);
254 return TCP_TW_SUCCESS;
255 }
256 EXPORT_SYMBOL(tcp_timewait_state_process);
257
258 /*
259 * Move a socket to time-wait or dead fin-wait-2 state.
260 */
tcp_time_wait(struct sock * sk,int state,int timeo)261 void tcp_time_wait(struct sock *sk, int state, int timeo)
262 {
263 const struct inet_connection_sock *icsk = inet_csk(sk);
264 const struct tcp_sock *tp = tcp_sk(sk);
265 struct inet_timewait_sock *tw;
266 bool recycle_ok = false;
267
268 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
269 recycle_ok = tcp_remember_stamp(sk);
270
271 tw = inet_twsk_alloc(sk, &tcp_death_row, state);
272
273 if (tw) {
274 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
275 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
276 struct inet_sock *inet = inet_sk(sk);
277
278 tw->tw_transparent = inet->transparent;
279 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
280 tcptw->tw_rcv_nxt = tp->rcv_nxt;
281 tcptw->tw_snd_nxt = tp->snd_nxt;
282 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
283 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
284 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
285 tcptw->tw_ts_offset = tp->tsoffset;
286 tcptw->tw_last_oow_ack_time = 0;
287
288 #if IS_ENABLED(CONFIG_IPV6)
289 if (tw->tw_family == PF_INET6) {
290 struct ipv6_pinfo *np = inet6_sk(sk);
291
292 tw->tw_v6_daddr = sk->sk_v6_daddr;
293 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
294 tw->tw_tclass = np->tclass;
295 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
296 tw->tw_ipv6only = sk->sk_ipv6only;
297 }
298 #endif
299
300 #ifdef CONFIG_TCP_MD5SIG
301 /*
302 * The timewait bucket does not have the key DB from the
303 * sock structure. We just make a quick copy of the
304 * md5 key being used (if indeed we are using one)
305 * so the timewait ack generating code has the key.
306 */
307 do {
308 struct tcp_md5sig_key *key;
309 tcptw->tw_md5_key = NULL;
310 key = tp->af_specific->md5_lookup(sk, sk);
311 if (key) {
312 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
313 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
314 BUG();
315 }
316 } while (0);
317 #endif
318
319 /* Get the TIME_WAIT timeout firing. */
320 if (timeo < rto)
321 timeo = rto;
322
323 if (recycle_ok) {
324 tw->tw_timeout = rto;
325 } else {
326 tw->tw_timeout = TCP_TIMEWAIT_LEN;
327 if (state == TCP_TIME_WAIT)
328 timeo = TCP_TIMEWAIT_LEN;
329 }
330
331 /* tw_timer is pinned, so we need to make sure BH are disabled
332 * in following section, otherwise timer handler could run before
333 * we complete the initialization.
334 */
335 local_bh_disable();
336 inet_twsk_schedule(tw, timeo);
337 /* Linkage updates. */
338 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
339 inet_twsk_put(tw);
340 local_bh_enable();
341 } else {
342 /* Sorry, if we're out of memory, just CLOSE this
343 * socket up. We've got bigger problems than
344 * non-graceful socket closings.
345 */
346 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
347 }
348
349 tcp_update_metrics(sk);
350 tcp_done(sk);
351 }
352
tcp_twsk_destructor(struct sock * sk)353 void tcp_twsk_destructor(struct sock *sk)
354 {
355 #ifdef CONFIG_TCP_MD5SIG
356 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
357
358 if (twsk->tw_md5_key)
359 kfree_rcu(twsk->tw_md5_key, rcu);
360 #endif
361 }
362 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
363
364 /* Warning : This function is called without sk_listener being locked.
365 * Be sure to read socket fields once, as their value could change under us.
366 */
tcp_openreq_init_rwin(struct request_sock * req,const struct sock * sk_listener,const struct dst_entry * dst)367 void tcp_openreq_init_rwin(struct request_sock *req,
368 const struct sock *sk_listener,
369 const struct dst_entry *dst)
370 {
371 struct inet_request_sock *ireq = inet_rsk(req);
372 const struct tcp_sock *tp = tcp_sk(sk_listener);
373 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
374 int full_space = tcp_full_space(sk_listener);
375 int mss = dst_metric_advmss(dst);
376 u32 window_clamp;
377 __u8 rcv_wscale;
378
379 if (user_mss && user_mss < mss)
380 mss = user_mss;
381
382 window_clamp = READ_ONCE(tp->window_clamp);
383 /* Set this up on the first call only */
384 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
385
386 /* limit the window selection if the user enforce a smaller rx buffer */
387 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
388 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
389 req->rsk_window_clamp = full_space;
390
391 /* tcp_full_space because it is guaranteed to be the first packet */
392 tcp_select_initial_window(full_space,
393 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
394 &req->rsk_rcv_wnd,
395 &req->rsk_window_clamp,
396 ireq->wscale_ok,
397 &rcv_wscale,
398 dst_metric(dst, RTAX_INITRWND));
399 ireq->rcv_wscale = rcv_wscale;
400 }
401 EXPORT_SYMBOL(tcp_openreq_init_rwin);
402
tcp_ecn_openreq_child(struct tcp_sock * tp,const struct request_sock * req)403 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
404 const struct request_sock *req)
405 {
406 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
407 }
408
tcp_ca_openreq_child(struct sock * sk,const struct dst_entry * dst)409 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
410 {
411 struct inet_connection_sock *icsk = inet_csk(sk);
412 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
413 bool ca_got_dst = false;
414
415 if (ca_key != TCP_CA_UNSPEC) {
416 const struct tcp_congestion_ops *ca;
417
418 rcu_read_lock();
419 ca = tcp_ca_find_key(ca_key);
420 if (likely(ca && try_module_get(ca->owner))) {
421 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
422 icsk->icsk_ca_ops = ca;
423 ca_got_dst = true;
424 }
425 rcu_read_unlock();
426 }
427
428 /* If no valid choice made yet, assign current system default ca. */
429 if (!ca_got_dst &&
430 (!icsk->icsk_ca_setsockopt ||
431 !try_module_get(icsk->icsk_ca_ops->owner)))
432 tcp_assign_congestion_control(sk);
433
434 tcp_set_ca_state(sk, TCP_CA_Open);
435 }
436 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
437
438 /* This is not only more efficient than what we used to do, it eliminates
439 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
440 *
441 * Actually, we could lots of memory writes here. tp of listening
442 * socket contains all necessary default parameters.
443 */
tcp_create_openreq_child(const struct sock * sk,struct request_sock * req,struct sk_buff * skb)444 struct sock *tcp_create_openreq_child(const struct sock *sk,
445 struct request_sock *req,
446 struct sk_buff *skb)
447 {
448 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
449
450 if (newsk) {
451 const struct inet_request_sock *ireq = inet_rsk(req);
452 struct tcp_request_sock *treq = tcp_rsk(req);
453 struct inet_connection_sock *newicsk = inet_csk(newsk);
454 struct tcp_sock *newtp = tcp_sk(newsk);
455
456 /* Now setup tcp_sock */
457 newtp->pred_flags = 0;
458
459 newtp->rcv_wup = newtp->copied_seq =
460 newtp->rcv_nxt = treq->rcv_isn + 1;
461 newtp->segs_in = 1;
462
463 newtp->snd_sml = newtp->snd_una =
464 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
465
466 tcp_prequeue_init(newtp);
467 INIT_LIST_HEAD(&newtp->tsq_node);
468
469 tcp_init_wl(newtp, treq->rcv_isn);
470
471 newtp->srtt_us = 0;
472 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
473 minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U);
474 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
475 newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
476
477 newtp->packets_out = 0;
478 newtp->retrans_out = 0;
479 newtp->sacked_out = 0;
480 newtp->fackets_out = 0;
481 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
482 tcp_enable_early_retrans(newtp);
483 newtp->tlp_high_seq = 0;
484 newtp->lsndtime = treq->snt_synack.stamp_jiffies;
485 newsk->sk_txhash = treq->txhash;
486 newtp->last_oow_ack_time = 0;
487 newtp->total_retrans = req->num_retrans;
488
489 /* So many TCP implementations out there (incorrectly) count the
490 * initial SYN frame in their delayed-ACK and congestion control
491 * algorithms that we must have the following bandaid to talk
492 * efficiently to them. -DaveM
493 */
494 newtp->snd_cwnd = TCP_INIT_CWND;
495 newtp->snd_cwnd_cnt = 0;
496
497 /* There's a bubble in the pipe until at least the first ACK. */
498 newtp->app_limited = ~0U;
499
500 tcp_init_xmit_timers(newsk);
501 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
502
503 newtp->rx_opt.saw_tstamp = 0;
504
505 newtp->rx_opt.dsack = 0;
506 newtp->rx_opt.num_sacks = 0;
507
508 newtp->urg_data = 0;
509
510 if (sock_flag(newsk, SOCK_KEEPOPEN))
511 inet_csk_reset_keepalive_timer(newsk,
512 keepalive_time_when(newtp));
513
514 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
515 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
516 if (sysctl_tcp_fack)
517 tcp_enable_fack(newtp);
518 }
519 newtp->window_clamp = req->rsk_window_clamp;
520 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
521 newtp->rcv_wnd = req->rsk_rcv_wnd;
522 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
523 if (newtp->rx_opt.wscale_ok) {
524 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
525 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
526 } else {
527 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
528 newtp->window_clamp = min(newtp->window_clamp, 65535U);
529 }
530 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
531 newtp->rx_opt.snd_wscale);
532 newtp->max_window = newtp->snd_wnd;
533
534 if (newtp->rx_opt.tstamp_ok) {
535 newtp->rx_opt.ts_recent = req->ts_recent;
536 newtp->rx_opt.ts_recent_stamp = get_seconds();
537 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
538 } else {
539 newtp->rx_opt.ts_recent_stamp = 0;
540 newtp->tcp_header_len = sizeof(struct tcphdr);
541 }
542 newtp->tsoffset = 0;
543 #ifdef CONFIG_TCP_MD5SIG
544 newtp->md5sig_info = NULL; /*XXX*/
545 if (newtp->af_specific->md5_lookup(sk, newsk))
546 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
547 #endif
548 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
549 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
550 newtp->rx_opt.mss_clamp = req->mss;
551 tcp_ecn_openreq_child(newtp, req);
552 newtp->fastopen_req = NULL;
553 newtp->fastopen_rsk = NULL;
554 newtp->syn_data_acked = 0;
555 newtp->rack.mstamp.v64 = 0;
556 newtp->rack.advanced = 0;
557
558 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
559 }
560 return newsk;
561 }
562 EXPORT_SYMBOL(tcp_create_openreq_child);
563
564 /*
565 * Process an incoming packet for SYN_RECV sockets represented as a
566 * request_sock. Normally sk is the listener socket but for TFO it
567 * points to the child socket.
568 *
569 * XXX (TFO) - The current impl contains a special check for ack
570 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
571 *
572 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
573 */
574
tcp_check_req(struct sock * sk,struct sk_buff * skb,struct request_sock * req,bool fastopen)575 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
576 struct request_sock *req,
577 bool fastopen)
578 {
579 struct tcp_options_received tmp_opt;
580 struct sock *child;
581 const struct tcphdr *th = tcp_hdr(skb);
582 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
583 bool paws_reject = false;
584 bool own_req;
585
586 tmp_opt.saw_tstamp = 0;
587 if (th->doff > (sizeof(struct tcphdr)>>2)) {
588 tcp_parse_options(skb, &tmp_opt, 0, NULL);
589
590 if (tmp_opt.saw_tstamp) {
591 tmp_opt.ts_recent = req->ts_recent;
592 /* We do not store true stamp, but it is not required,
593 * it can be estimated (approximately)
594 * from another data.
595 */
596 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
597 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
598 }
599 }
600
601 /* Check for pure retransmitted SYN. */
602 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
603 flg == TCP_FLAG_SYN &&
604 !paws_reject) {
605 /*
606 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
607 * this case on figure 6 and figure 8, but formal
608 * protocol description says NOTHING.
609 * To be more exact, it says that we should send ACK,
610 * because this segment (at least, if it has no data)
611 * is out of window.
612 *
613 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
614 * describe SYN-RECV state. All the description
615 * is wrong, we cannot believe to it and should
616 * rely only on common sense and implementation
617 * experience.
618 *
619 * Enforce "SYN-ACK" according to figure 8, figure 6
620 * of RFC793, fixed by RFC1122.
621 *
622 * Note that even if there is new data in the SYN packet
623 * they will be thrown away too.
624 *
625 * Reset timer after retransmitting SYNACK, similar to
626 * the idea of fast retransmit in recovery.
627 */
628 if (!tcp_oow_rate_limited(sock_net(sk), skb,
629 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
630 &tcp_rsk(req)->last_oow_ack_time) &&
631
632 !inet_rtx_syn_ack(sk, req)) {
633 unsigned long expires = jiffies;
634
635 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
636 TCP_RTO_MAX);
637 if (!fastopen)
638 mod_timer_pending(&req->rsk_timer, expires);
639 else
640 req->rsk_timer.expires = expires;
641 }
642 return NULL;
643 }
644
645 /* Further reproduces section "SEGMENT ARRIVES"
646 for state SYN-RECEIVED of RFC793.
647 It is broken, however, it does not work only
648 when SYNs are crossed.
649
650 You would think that SYN crossing is impossible here, since
651 we should have a SYN_SENT socket (from connect()) on our end,
652 but this is not true if the crossed SYNs were sent to both
653 ends by a malicious third party. We must defend against this,
654 and to do that we first verify the ACK (as per RFC793, page
655 36) and reset if it is invalid. Is this a true full defense?
656 To convince ourselves, let us consider a way in which the ACK
657 test can still pass in this 'malicious crossed SYNs' case.
658 Malicious sender sends identical SYNs (and thus identical sequence
659 numbers) to both A and B:
660
661 A: gets SYN, seq=7
662 B: gets SYN, seq=7
663
664 By our good fortune, both A and B select the same initial
665 send sequence number of seven :-)
666
667 A: sends SYN|ACK, seq=7, ack_seq=8
668 B: sends SYN|ACK, seq=7, ack_seq=8
669
670 So we are now A eating this SYN|ACK, ACK test passes. So
671 does sequence test, SYN is truncated, and thus we consider
672 it a bare ACK.
673
674 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
675 bare ACK. Otherwise, we create an established connection. Both
676 ends (listening sockets) accept the new incoming connection and try
677 to talk to each other. 8-)
678
679 Note: This case is both harmless, and rare. Possibility is about the
680 same as us discovering intelligent life on another plant tomorrow.
681
682 But generally, we should (RFC lies!) to accept ACK
683 from SYNACK both here and in tcp_rcv_state_process().
684 tcp_rcv_state_process() does not, hence, we do not too.
685
686 Note that the case is absolutely generic:
687 we cannot optimize anything here without
688 violating protocol. All the checks must be made
689 before attempt to create socket.
690 */
691
692 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
693 * and the incoming segment acknowledges something not yet
694 * sent (the segment carries an unacceptable ACK) ...
695 * a reset is sent."
696 *
697 * Invalid ACK: reset will be sent by listening socket.
698 * Note that the ACK validity check for a Fast Open socket is done
699 * elsewhere and is checked directly against the child socket rather
700 * than req because user data may have been sent out.
701 */
702 if ((flg & TCP_FLAG_ACK) && !fastopen &&
703 (TCP_SKB_CB(skb)->ack_seq !=
704 tcp_rsk(req)->snt_isn + 1))
705 return sk;
706
707 /* Also, it would be not so bad idea to check rcv_tsecr, which
708 * is essentially ACK extension and too early or too late values
709 * should cause reset in unsynchronized states.
710 */
711
712 /* RFC793: "first check sequence number". */
713
714 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
715 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
716 /* Out of window: send ACK and drop. */
717 if (!(flg & TCP_FLAG_RST) &&
718 !tcp_oow_rate_limited(sock_net(sk), skb,
719 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
720 &tcp_rsk(req)->last_oow_ack_time))
721 req->rsk_ops->send_ack(sk, skb, req);
722 if (paws_reject)
723 __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
724 return NULL;
725 }
726
727 /* In sequence, PAWS is OK. */
728
729 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
730 req->ts_recent = tmp_opt.rcv_tsval;
731
732 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
733 /* Truncate SYN, it is out of window starting
734 at tcp_rsk(req)->rcv_isn + 1. */
735 flg &= ~TCP_FLAG_SYN;
736 }
737
738 /* RFC793: "second check the RST bit" and
739 * "fourth, check the SYN bit"
740 */
741 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
742 __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
743 goto embryonic_reset;
744 }
745
746 /* ACK sequence verified above, just make sure ACK is
747 * set. If ACK not set, just silently drop the packet.
748 *
749 * XXX (TFO) - if we ever allow "data after SYN", the
750 * following check needs to be removed.
751 */
752 if (!(flg & TCP_FLAG_ACK))
753 return NULL;
754
755 /* For Fast Open no more processing is needed (sk is the
756 * child socket).
757 */
758 if (fastopen)
759 return sk;
760
761 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
762 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
763 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
764 inet_rsk(req)->acked = 1;
765 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
766 return NULL;
767 }
768
769 /* OK, ACK is valid, create big socket and
770 * feed this segment to it. It will repeat all
771 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
772 * ESTABLISHED STATE. If it will be dropped after
773 * socket is created, wait for troubles.
774 */
775 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
776 req, &own_req);
777 if (!child)
778 goto listen_overflow;
779
780 sock_rps_save_rxhash(child, skb);
781 tcp_synack_rtt_meas(child, req);
782 return inet_csk_complete_hashdance(sk, child, req, own_req);
783
784 listen_overflow:
785 if (!sysctl_tcp_abort_on_overflow) {
786 inet_rsk(req)->acked = 1;
787 return NULL;
788 }
789
790 embryonic_reset:
791 if (!(flg & TCP_FLAG_RST)) {
792 /* Received a bad SYN pkt - for TFO We try not to reset
793 * the local connection unless it's really necessary to
794 * avoid becoming vulnerable to outside attack aiming at
795 * resetting legit local connections.
796 */
797 req->rsk_ops->send_reset(sk, skb);
798 } else if (fastopen) { /* received a valid RST pkt */
799 reqsk_fastopen_remove(sk, req, true);
800 tcp_reset(sk);
801 }
802 if (!fastopen) {
803 inet_csk_reqsk_queue_drop(sk, req);
804 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
805 }
806 return NULL;
807 }
808 EXPORT_SYMBOL(tcp_check_req);
809
810 /*
811 * Queue segment on the new socket if the new socket is active,
812 * otherwise we just shortcircuit this and continue with
813 * the new socket.
814 *
815 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
816 * when entering. But other states are possible due to a race condition
817 * where after __inet_lookup_established() fails but before the listener
818 * locked is obtained, other packets cause the same connection to
819 * be created.
820 */
821
tcp_child_process(struct sock * parent,struct sock * child,struct sk_buff * skb)822 int tcp_child_process(struct sock *parent, struct sock *child,
823 struct sk_buff *skb)
824 {
825 int ret = 0;
826 int state = child->sk_state;
827
828 tcp_segs_in(tcp_sk(child), skb);
829 if (!sock_owned_by_user(child)) {
830 ret = tcp_rcv_state_process(child, skb);
831 /* Wakeup parent, send SIGIO */
832 if (state == TCP_SYN_RECV && child->sk_state != state)
833 parent->sk_data_ready(parent);
834 } else {
835 /* Alas, it is possible again, because we do lookup
836 * in main socket hash table and lock on listening
837 * socket does not protect us more.
838 */
839 __sk_add_backlog(child, skb);
840 }
841
842 bh_unlock_sock(child);
843 sock_put(child);
844 return ret;
845 }
846 EXPORT_SYMBOL(tcp_child_process);
847