1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/sysctl.h>
25 #include <linux/workqueue.h>
26 #include <net/tcp.h>
27 #include <net/inet_common.h>
28 #include <net/xfrm.h>
29
30 int sysctl_tcp_syncookies __read_mostly = 1;
31 EXPORT_SYMBOL(sysctl_tcp_syncookies);
32
33 int sysctl_tcp_abort_on_overflow __read_mostly;
34
35 struct inet_timewait_death_row tcp_death_row = {
36 .sysctl_max_tw_buckets = NR_FILE * 2,
37 .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
38 .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),
39 .hashinfo = &tcp_hashinfo,
40 .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
41 (unsigned long)&tcp_death_row),
42 .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work,
43 inet_twdr_twkill_work),
44 /* Short-time timewait calendar */
45
46 .twcal_hand = -1,
47 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
48 (unsigned long)&tcp_death_row),
49 };
50 EXPORT_SYMBOL_GPL(tcp_death_row);
51
tcp_in_window(u32 seq,u32 end_seq,u32 s_win,u32 e_win)52 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
53 {
54 if (seq == s_win)
55 return true;
56 if (after(end_seq, s_win) && before(seq, e_win))
57 return true;
58 return seq == e_win && seq == end_seq;
59 }
60
61 /*
62 * * Main purpose of TIME-WAIT state is to close connection gracefully,
63 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
64 * (and, probably, tail of data) and one or more our ACKs are lost.
65 * * What is TIME-WAIT timeout? It is associated with maximal packet
66 * lifetime in the internet, which results in wrong conclusion, that
67 * it is set to catch "old duplicate segments" wandering out of their path.
68 * It is not quite correct. This timeout is calculated so that it exceeds
69 * maximal retransmission timeout enough to allow to lose one (or more)
70 * segments sent by peer and our ACKs. This time may be calculated from RTO.
71 * * When TIME-WAIT socket receives RST, it means that another end
72 * finally closed and we are allowed to kill TIME-WAIT too.
73 * * Second purpose of TIME-WAIT is catching old duplicate segments.
74 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
75 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
76 * * If we invented some more clever way to catch duplicates
77 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
78 *
79 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
80 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
81 * from the very beginning.
82 *
83 * NOTE. With recycling (and later with fin-wait-2) TW bucket
84 * is _not_ stateless. It means, that strictly speaking we must
85 * spinlock it. I do not want! Well, probability of misbehaviour
86 * is ridiculously low and, seems, we could use some mb() tricks
87 * to avoid misread sequence numbers, states etc. --ANK
88 *
89 * We don't need to initialize tmp_out.sack_ok as we don't use the results
90 */
91 enum tcp_tw_status
tcp_timewait_state_process(struct inet_timewait_sock * tw,struct sk_buff * skb,const struct tcphdr * th)92 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93 const struct tcphdr *th)
94 {
95 struct tcp_options_received tmp_opt;
96 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
97 bool paws_reject = false;
98
99 tmp_opt.saw_tstamp = 0;
100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
101 tcp_parse_options(skb, &tmp_opt, 0, NULL);
102
103 if (tmp_opt.saw_tstamp) {
104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
105 tmp_opt.ts_recent = tcptw->tw_ts_recent;
106 tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
107 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
108 }
109 }
110
111 if (tw->tw_substate == TCP_FIN_WAIT2) {
112 /* Just repeat all the checks of tcp_rcv_state_process() */
113
114 /* Out of window, send ACK */
115 if (paws_reject ||
116 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
117 tcptw->tw_rcv_nxt,
118 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
119 return TCP_TW_ACK;
120
121 if (th->rst)
122 goto kill;
123
124 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
125 goto kill_with_rst;
126
127 /* Dup ACK? */
128 if (!th->ack ||
129 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
130 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
131 inet_twsk_put(tw);
132 return TCP_TW_SUCCESS;
133 }
134
135 /* New data or FIN. If new data arrive after half-duplex close,
136 * reset.
137 */
138 if (!th->fin ||
139 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
140 kill_with_rst:
141 inet_twsk_deschedule(tw, &tcp_death_row);
142 inet_twsk_put(tw);
143 return TCP_TW_RST;
144 }
145
146 /* FIN arrived, enter true time-wait state. */
147 tw->tw_substate = TCP_TIME_WAIT;
148 tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq;
149 if (tmp_opt.saw_tstamp) {
150 tcptw->tw_ts_recent_stamp = get_seconds();
151 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
152 }
153
154 if (tcp_death_row.sysctl_tw_recycle &&
155 tcptw->tw_ts_recent_stamp &&
156 tcp_tw_remember_stamp(tw))
157 inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
158 TCP_TIMEWAIT_LEN);
159 else
160 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
161 TCP_TIMEWAIT_LEN);
162 return TCP_TW_ACK;
163 }
164
165 /*
166 * Now real TIME-WAIT state.
167 *
168 * RFC 1122:
169 * "When a connection is [...] on TIME-WAIT state [...]
170 * [a TCP] MAY accept a new SYN from the remote TCP to
171 * reopen the connection directly, if it:
172 *
173 * (1) assigns its initial sequence number for the new
174 * connection to be larger than the largest sequence
175 * number it used on the previous connection incarnation,
176 * and
177 *
178 * (2) returns to TIME-WAIT state if the SYN turns out
179 * to be an old duplicate".
180 */
181
182 if (!paws_reject &&
183 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
184 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
185 /* In window segment, it may be only reset or bare ack. */
186
187 if (th->rst) {
188 /* This is TIME_WAIT assassination, in two flavors.
189 * Oh well... nobody has a sufficient solution to this
190 * protocol bug yet.
191 */
192 if (sysctl_tcp_rfc1337 == 0) {
193 kill:
194 inet_twsk_deschedule(tw, &tcp_death_row);
195 inet_twsk_put(tw);
196 return TCP_TW_SUCCESS;
197 }
198 }
199 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
200 TCP_TIMEWAIT_LEN);
201
202 if (tmp_opt.saw_tstamp) {
203 tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
204 tcptw->tw_ts_recent_stamp = get_seconds();
205 }
206
207 inet_twsk_put(tw);
208 return TCP_TW_SUCCESS;
209 }
210
211 /* Out of window segment.
212
213 All the segments are ACKed immediately.
214
215 The only exception is new SYN. We accept it, if it is
216 not old duplicate and we are not in danger to be killed
217 by delayed old duplicates. RFC check is that it has
218 newer sequence number works at rates <40Mbit/sec.
219 However, if paws works, it is reliable AND even more,
220 we even may relax silly seq space cutoff.
221
222 RED-PEN: we violate main RFC requirement, if this SYN will appear
223 old duplicate (i.e. we receive RST in reply to SYN-ACK),
224 we must return socket to time-wait state. It is not good,
225 but not fatal yet.
226 */
227
228 if (th->syn && !th->rst && !th->ack && !paws_reject &&
229 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
230 (tmp_opt.saw_tstamp &&
231 (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) {
232 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
233 if (isn == 0)
234 isn++;
235 TCP_SKB_CB(skb)->tcp_tw_isn = isn;
236 return TCP_TW_SYN;
237 }
238
239 if (paws_reject)
240 NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
241
242 if (!th->rst) {
243 /* In this case we must reset the TIMEWAIT timer.
244 *
245 * If it is ACKless SYN it may be both old duplicate
246 * and new good SYN with random sequence number <rcv_nxt.
247 * Do not reschedule in the last case.
248 */
249 if (paws_reject || th->ack)
250 inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN,
251 TCP_TIMEWAIT_LEN);
252
253 /* Send ACK. Note, we do not put the bucket,
254 * it will be released by caller.
255 */
256 return TCP_TW_ACK;
257 }
258 inet_twsk_put(tw);
259 return TCP_TW_SUCCESS;
260 }
261 EXPORT_SYMBOL(tcp_timewait_state_process);
262
263 /*
264 * Move a socket to time-wait or dead fin-wait-2 state.
265 */
tcp_time_wait(struct sock * sk,int state,int timeo)266 void tcp_time_wait(struct sock *sk, int state, int timeo)
267 {
268 struct inet_timewait_sock *tw = NULL;
269 const struct inet_connection_sock *icsk = inet_csk(sk);
270 const struct tcp_sock *tp = tcp_sk(sk);
271 bool recycle_ok = false;
272
273 if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
274 recycle_ok = tcp_remember_stamp(sk);
275
276 if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
277 tw = inet_twsk_alloc(sk, state);
278
279 if (tw != NULL) {
280 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
281 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
282 struct inet_sock *inet = inet_sk(sk);
283
284 tw->tw_transparent = inet->transparent;
285 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
286 tcptw->tw_rcv_nxt = tp->rcv_nxt;
287 tcptw->tw_snd_nxt = tp->snd_nxt;
288 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
289 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
290 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
291 tcptw->tw_ts_offset = tp->tsoffset;
292
293 #if IS_ENABLED(CONFIG_IPV6)
294 if (tw->tw_family == PF_INET6) {
295 struct ipv6_pinfo *np = inet6_sk(sk);
296
297 tw->tw_v6_daddr = sk->sk_v6_daddr;
298 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
299 tw->tw_tclass = np->tclass;
300 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
301 tw->tw_ipv6only = sk->sk_ipv6only;
302 }
303 #endif
304
305 #ifdef CONFIG_TCP_MD5SIG
306 /*
307 * The timewait bucket does not have the key DB from the
308 * sock structure. We just make a quick copy of the
309 * md5 key being used (if indeed we are using one)
310 * so the timewait ack generating code has the key.
311 */
312 do {
313 struct tcp_md5sig_key *key;
314 tcptw->tw_md5_key = NULL;
315 key = tp->af_specific->md5_lookup(sk, sk);
316 if (key != NULL) {
317 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
318 if (tcptw->tw_md5_key && !tcp_alloc_md5sig_pool())
319 BUG();
320 }
321 } while (0);
322 #endif
323
324 /* Linkage updates. */
325 __inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
326
327 /* Get the TIME_WAIT timeout firing. */
328 if (timeo < rto)
329 timeo = rto;
330
331 if (recycle_ok) {
332 tw->tw_timeout = rto;
333 } else {
334 tw->tw_timeout = TCP_TIMEWAIT_LEN;
335 if (state == TCP_TIME_WAIT)
336 timeo = TCP_TIMEWAIT_LEN;
337 }
338
339 inet_twsk_schedule(tw, &tcp_death_row, timeo,
340 TCP_TIMEWAIT_LEN);
341 inet_twsk_put(tw);
342 } else {
343 /* Sorry, if we're out of memory, just CLOSE this
344 * socket up. We've got bigger problems than
345 * non-graceful socket closings.
346 */
347 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
348 }
349
350 tcp_update_metrics(sk);
351 tcp_done(sk);
352 }
353
tcp_twsk_destructor(struct sock * sk)354 void tcp_twsk_destructor(struct sock *sk)
355 {
356 #ifdef CONFIG_TCP_MD5SIG
357 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
358
359 if (twsk->tw_md5_key)
360 kfree_rcu(twsk->tw_md5_key, rcu);
361 #endif
362 }
363 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
364
tcp_openreq_init_rwin(struct request_sock * req,struct sock * sk,struct dst_entry * dst)365 void tcp_openreq_init_rwin(struct request_sock *req,
366 struct sock *sk, struct dst_entry *dst)
367 {
368 struct inet_request_sock *ireq = inet_rsk(req);
369 struct tcp_sock *tp = tcp_sk(sk);
370 __u8 rcv_wscale;
371 int mss = dst_metric_advmss(dst);
372
373 if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
374 mss = tp->rx_opt.user_mss;
375
376 /* Set this up on the first call only */
377 req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
378
379 /* limit the window selection if the user enforce a smaller rx buffer */
380 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
381 (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
382 req->window_clamp = tcp_full_space(sk);
383
384 /* tcp_full_space because it is guaranteed to be the first packet */
385 tcp_select_initial_window(tcp_full_space(sk),
386 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
387 &req->rcv_wnd,
388 &req->window_clamp,
389 ireq->wscale_ok,
390 &rcv_wscale,
391 dst_metric(dst, RTAX_INITRWND));
392 ireq->rcv_wscale = rcv_wscale;
393 }
394 EXPORT_SYMBOL(tcp_openreq_init_rwin);
395
tcp_ecn_openreq_child(struct tcp_sock * tp,const struct request_sock * req)396 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
397 const struct request_sock *req)
398 {
399 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
400 }
401
402 /* This is not only more efficient than what we used to do, it eliminates
403 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
404 *
405 * Actually, we could lots of memory writes here. tp of listening
406 * socket contains all necessary default parameters.
407 */
tcp_create_openreq_child(struct sock * sk,struct request_sock * req,struct sk_buff * skb)408 struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb)
409 {
410 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
411
412 if (newsk != NULL) {
413 const struct inet_request_sock *ireq = inet_rsk(req);
414 struct tcp_request_sock *treq = tcp_rsk(req);
415 struct inet_connection_sock *newicsk = inet_csk(newsk);
416 struct tcp_sock *newtp = tcp_sk(newsk);
417
418 /* Now setup tcp_sock */
419 newtp->pred_flags = 0;
420
421 newtp->rcv_wup = newtp->copied_seq =
422 newtp->rcv_nxt = treq->rcv_isn + 1;
423
424 newtp->snd_sml = newtp->snd_una =
425 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
426
427 tcp_prequeue_init(newtp);
428 INIT_LIST_HEAD(&newtp->tsq_node);
429
430 tcp_init_wl(newtp, treq->rcv_isn);
431
432 newtp->srtt_us = 0;
433 newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
434 newicsk->icsk_rto = TCP_TIMEOUT_INIT;
435 newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
436
437 newtp->packets_out = 0;
438 newtp->retrans_out = 0;
439 newtp->sacked_out = 0;
440 newtp->fackets_out = 0;
441 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
442 tcp_enable_early_retrans(newtp);
443 newtp->tlp_high_seq = 0;
444 newtp->lsndtime = treq->snt_synack;
445 newtp->total_retrans = req->num_retrans;
446
447 /* So many TCP implementations out there (incorrectly) count the
448 * initial SYN frame in their delayed-ACK and congestion control
449 * algorithms that we must have the following bandaid to talk
450 * efficiently to them. -DaveM
451 */
452 newtp->snd_cwnd = TCP_INIT_CWND;
453 newtp->snd_cwnd_cnt = 0;
454
455 if (!newicsk->icsk_ca_setsockopt ||
456 !try_module_get(newicsk->icsk_ca_ops->owner))
457 tcp_assign_congestion_control(newsk);
458
459 tcp_set_ca_state(newsk, TCP_CA_Open);
460 tcp_init_xmit_timers(newsk);
461 __skb_queue_head_init(&newtp->out_of_order_queue);
462 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
463
464 newtp->rx_opt.saw_tstamp = 0;
465
466 newtp->rx_opt.dsack = 0;
467 newtp->rx_opt.num_sacks = 0;
468
469 newtp->urg_data = 0;
470
471 if (sock_flag(newsk, SOCK_KEEPOPEN))
472 inet_csk_reset_keepalive_timer(newsk,
473 keepalive_time_when(newtp));
474
475 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
476 if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
477 if (sysctl_tcp_fack)
478 tcp_enable_fack(newtp);
479 }
480 newtp->window_clamp = req->window_clamp;
481 newtp->rcv_ssthresh = req->rcv_wnd;
482 newtp->rcv_wnd = req->rcv_wnd;
483 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
484 if (newtp->rx_opt.wscale_ok) {
485 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
486 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
487 } else {
488 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
489 newtp->window_clamp = min(newtp->window_clamp, 65535U);
490 }
491 newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) <<
492 newtp->rx_opt.snd_wscale);
493 newtp->max_window = newtp->snd_wnd;
494
495 if (newtp->rx_opt.tstamp_ok) {
496 newtp->rx_opt.ts_recent = req->ts_recent;
497 newtp->rx_opt.ts_recent_stamp = get_seconds();
498 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
499 } else {
500 newtp->rx_opt.ts_recent_stamp = 0;
501 newtp->tcp_header_len = sizeof(struct tcphdr);
502 }
503 newtp->tsoffset = 0;
504 #ifdef CONFIG_TCP_MD5SIG
505 newtp->md5sig_info = NULL; /*XXX*/
506 if (newtp->af_specific->md5_lookup(sk, newsk))
507 newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
508 #endif
509 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
510 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
511 newtp->rx_opt.mss_clamp = req->mss;
512 tcp_ecn_openreq_child(newtp, req);
513 newtp->fastopen_rsk = NULL;
514 newtp->syn_data_acked = 0;
515
516 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS);
517 }
518 return newsk;
519 }
520 EXPORT_SYMBOL(tcp_create_openreq_child);
521
522 /*
523 * Process an incoming packet for SYN_RECV sockets represented as a
524 * request_sock. Normally sk is the listener socket but for TFO it
525 * points to the child socket.
526 *
527 * XXX (TFO) - The current impl contains a special check for ack
528 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
529 *
530 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
531 */
532
tcp_check_req(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct request_sock ** prev,bool fastopen)533 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
534 struct request_sock *req,
535 struct request_sock **prev,
536 bool fastopen)
537 {
538 struct tcp_options_received tmp_opt;
539 struct sock *child;
540 const struct tcphdr *th = tcp_hdr(skb);
541 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
542 bool paws_reject = false;
543
544 BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN));
545
546 tmp_opt.saw_tstamp = 0;
547 if (th->doff > (sizeof(struct tcphdr)>>2)) {
548 tcp_parse_options(skb, &tmp_opt, 0, NULL);
549
550 if (tmp_opt.saw_tstamp) {
551 tmp_opt.ts_recent = req->ts_recent;
552 /* We do not store true stamp, but it is not required,
553 * it can be estimated (approximately)
554 * from another data.
555 */
556 tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
557 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
558 }
559 }
560
561 /* Check for pure retransmitted SYN. */
562 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
563 flg == TCP_FLAG_SYN &&
564 !paws_reject) {
565 /*
566 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
567 * this case on figure 6 and figure 8, but formal
568 * protocol description says NOTHING.
569 * To be more exact, it says that we should send ACK,
570 * because this segment (at least, if it has no data)
571 * is out of window.
572 *
573 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
574 * describe SYN-RECV state. All the description
575 * is wrong, we cannot believe to it and should
576 * rely only on common sense and implementation
577 * experience.
578 *
579 * Enforce "SYN-ACK" according to figure 8, figure 6
580 * of RFC793, fixed by RFC1122.
581 *
582 * Note that even if there is new data in the SYN packet
583 * they will be thrown away too.
584 *
585 * Reset timer after retransmitting SYNACK, similar to
586 * the idea of fast retransmit in recovery.
587 */
588 if (!inet_rtx_syn_ack(sk, req))
589 req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout,
590 TCP_RTO_MAX) + jiffies;
591 return NULL;
592 }
593
594 /* Further reproduces section "SEGMENT ARRIVES"
595 for state SYN-RECEIVED of RFC793.
596 It is broken, however, it does not work only
597 when SYNs are crossed.
598
599 You would think that SYN crossing is impossible here, since
600 we should have a SYN_SENT socket (from connect()) on our end,
601 but this is not true if the crossed SYNs were sent to both
602 ends by a malicious third party. We must defend against this,
603 and to do that we first verify the ACK (as per RFC793, page
604 36) and reset if it is invalid. Is this a true full defense?
605 To convince ourselves, let us consider a way in which the ACK
606 test can still pass in this 'malicious crossed SYNs' case.
607 Malicious sender sends identical SYNs (and thus identical sequence
608 numbers) to both A and B:
609
610 A: gets SYN, seq=7
611 B: gets SYN, seq=7
612
613 By our good fortune, both A and B select the same initial
614 send sequence number of seven :-)
615
616 A: sends SYN|ACK, seq=7, ack_seq=8
617 B: sends SYN|ACK, seq=7, ack_seq=8
618
619 So we are now A eating this SYN|ACK, ACK test passes. So
620 does sequence test, SYN is truncated, and thus we consider
621 it a bare ACK.
622
623 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
624 bare ACK. Otherwise, we create an established connection. Both
625 ends (listening sockets) accept the new incoming connection and try
626 to talk to each other. 8-)
627
628 Note: This case is both harmless, and rare. Possibility is about the
629 same as us discovering intelligent life on another plant tomorrow.
630
631 But generally, we should (RFC lies!) to accept ACK
632 from SYNACK both here and in tcp_rcv_state_process().
633 tcp_rcv_state_process() does not, hence, we do not too.
634
635 Note that the case is absolutely generic:
636 we cannot optimize anything here without
637 violating protocol. All the checks must be made
638 before attempt to create socket.
639 */
640
641 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
642 * and the incoming segment acknowledges something not yet
643 * sent (the segment carries an unacceptable ACK) ...
644 * a reset is sent."
645 *
646 * Invalid ACK: reset will be sent by listening socket.
647 * Note that the ACK validity check for a Fast Open socket is done
648 * elsewhere and is checked directly against the child socket rather
649 * than req because user data may have been sent out.
650 */
651 if ((flg & TCP_FLAG_ACK) && !fastopen &&
652 (TCP_SKB_CB(skb)->ack_seq !=
653 tcp_rsk(req)->snt_isn + 1))
654 return sk;
655
656 /* Also, it would be not so bad idea to check rcv_tsecr, which
657 * is essentially ACK extension and too early or too late values
658 * should cause reset in unsynchronized states.
659 */
660
661 /* RFC793: "first check sequence number". */
662
663 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
664 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) {
665 /* Out of window: send ACK and drop. */
666 if (!(flg & TCP_FLAG_RST))
667 req->rsk_ops->send_ack(sk, skb, req);
668 if (paws_reject)
669 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
670 return NULL;
671 }
672
673 /* In sequence, PAWS is OK. */
674
675 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
676 req->ts_recent = tmp_opt.rcv_tsval;
677
678 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
679 /* Truncate SYN, it is out of window starting
680 at tcp_rsk(req)->rcv_isn + 1. */
681 flg &= ~TCP_FLAG_SYN;
682 }
683
684 /* RFC793: "second check the RST bit" and
685 * "fourth, check the SYN bit"
686 */
687 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
688 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
689 goto embryonic_reset;
690 }
691
692 /* ACK sequence verified above, just make sure ACK is
693 * set. If ACK not set, just silently drop the packet.
694 *
695 * XXX (TFO) - if we ever allow "data after SYN", the
696 * following check needs to be removed.
697 */
698 if (!(flg & TCP_FLAG_ACK))
699 return NULL;
700
701 /* For Fast Open no more processing is needed (sk is the
702 * child socket).
703 */
704 if (fastopen)
705 return sk;
706
707 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
708 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
709 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
710 inet_rsk(req)->acked = 1;
711 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
712 return NULL;
713 }
714
715 /* OK, ACK is valid, create big socket and
716 * feed this segment to it. It will repeat all
717 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
718 * ESTABLISHED STATE. If it will be dropped after
719 * socket is created, wait for troubles.
720 */
721 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
722 if (child == NULL)
723 goto listen_overflow;
724
725 inet_csk_reqsk_queue_unlink(sk, req, prev);
726 inet_csk_reqsk_queue_removed(sk, req);
727
728 inet_csk_reqsk_queue_add(sk, req, child);
729 return child;
730
731 listen_overflow:
732 if (!sysctl_tcp_abort_on_overflow) {
733 inet_rsk(req)->acked = 1;
734 return NULL;
735 }
736
737 embryonic_reset:
738 if (!(flg & TCP_FLAG_RST)) {
739 /* Received a bad SYN pkt - for TFO We try not to reset
740 * the local connection unless it's really necessary to
741 * avoid becoming vulnerable to outside attack aiming at
742 * resetting legit local connections.
743 */
744 req->rsk_ops->send_reset(sk, skb);
745 } else if (fastopen) { /* received a valid RST pkt */
746 reqsk_fastopen_remove(sk, req, true);
747 tcp_reset(sk);
748 }
749 if (!fastopen) {
750 inet_csk_reqsk_queue_drop(sk, req, prev);
751 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
752 }
753 return NULL;
754 }
755 EXPORT_SYMBOL(tcp_check_req);
756
757 /*
758 * Queue segment on the new socket if the new socket is active,
759 * otherwise we just shortcircuit this and continue with
760 * the new socket.
761 *
762 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
763 * when entering. But other states are possible due to a race condition
764 * where after __inet_lookup_established() fails but before the listener
765 * locked is obtained, other packets cause the same connection to
766 * be created.
767 */
768
tcp_child_process(struct sock * parent,struct sock * child,struct sk_buff * skb)769 int tcp_child_process(struct sock *parent, struct sock *child,
770 struct sk_buff *skb)
771 {
772 int ret = 0;
773 int state = child->sk_state;
774
775 if (!sock_owned_by_user(child)) {
776 ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb),
777 skb->len);
778 /* Wakeup parent, send SIGIO */
779 if (state == TCP_SYN_RECV && child->sk_state != state)
780 parent->sk_data_ready(parent);
781 } else {
782 /* Alas, it is possible again, because we do lookup
783 * in main socket hash table and lock on listening
784 * socket does not protect us more.
785 */
786 __sk_add_backlog(child, skb);
787 }
788
789 bh_unlock_sock(child);
790 sock_put(child);
791 return ret;
792 }
793 EXPORT_SYMBOL(tcp_child_process);
794