• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* (C) 1999-2001 Paul `Rusty' Russell
3  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
4  * (C) 2002-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
5  * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
6  */
7 
8 #include <linux/types.h>
9 #include <linux/timer.h>
10 #include <linux/module.h>
11 #include <linux/in.h>
12 #include <linux/tcp.h>
13 #include <linux/spinlock.h>
14 #include <linux/skbuff.h>
15 #include <linux/ipv6.h>
16 #include <net/ip6_checksum.h>
17 #include <asm/unaligned.h>
18 
19 #include <net/tcp.h>
20 
21 #include <linux/netfilter.h>
22 #include <linux/netfilter_ipv4.h>
23 #include <linux/netfilter_ipv6.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_l4proto.h>
26 #include <net/netfilter/nf_conntrack_ecache.h>
27 #include <net/netfilter/nf_conntrack_seqadj.h>
28 #include <net/netfilter/nf_conntrack_synproxy.h>
29 #include <net/netfilter/nf_conntrack_timeout.h>
30 #include <net/netfilter/nf_log.h>
31 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
32 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
33 
34   /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
35      closely.  They're more complex. --RR */
36 
37 static const char *const tcp_conntrack_names[] = {
38 	"NONE",
39 	"SYN_SENT",
40 	"SYN_RECV",
41 	"ESTABLISHED",
42 	"FIN_WAIT",
43 	"CLOSE_WAIT",
44 	"LAST_ACK",
45 	"TIME_WAIT",
46 	"CLOSE",
47 	"SYN_SENT2",
48 };
49 
50 enum nf_ct_tcp_action {
51 	NFCT_TCP_IGNORE,
52 	NFCT_TCP_INVALID,
53 	NFCT_TCP_ACCEPT,
54 };
55 
56 #define SECS * HZ
57 #define MINS * 60 SECS
58 #define HOURS * 60 MINS
59 #define DAYS * 24 HOURS
60 
61 static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
62 	[TCP_CONNTRACK_SYN_SENT]	= 2 MINS,
63 	[TCP_CONNTRACK_SYN_RECV]	= 60 SECS,
64 	[TCP_CONNTRACK_ESTABLISHED]	= 5 DAYS,
65 	[TCP_CONNTRACK_FIN_WAIT]	= 2 MINS,
66 	[TCP_CONNTRACK_CLOSE_WAIT]	= 60 SECS,
67 	[TCP_CONNTRACK_LAST_ACK]	= 30 SECS,
68 	[TCP_CONNTRACK_TIME_WAIT]	= 2 MINS,
69 	[TCP_CONNTRACK_CLOSE]		= 10 SECS,
70 	[TCP_CONNTRACK_SYN_SENT2]	= 2 MINS,
71 /* RFC1122 says the R2 limit should be at least 100 seconds.
72    Linux uses 15 packets as limit, which corresponds
73    to ~13-30min depending on RTO. */
74 	[TCP_CONNTRACK_RETRANS]		= 5 MINS,
75 	[TCP_CONNTRACK_UNACK]		= 5 MINS,
76 };
77 
78 #define sNO TCP_CONNTRACK_NONE
79 #define sSS TCP_CONNTRACK_SYN_SENT
80 #define sSR TCP_CONNTRACK_SYN_RECV
81 #define sES TCP_CONNTRACK_ESTABLISHED
82 #define sFW TCP_CONNTRACK_FIN_WAIT
83 #define sCW TCP_CONNTRACK_CLOSE_WAIT
84 #define sLA TCP_CONNTRACK_LAST_ACK
85 #define sTW TCP_CONNTRACK_TIME_WAIT
86 #define sCL TCP_CONNTRACK_CLOSE
87 #define sS2 TCP_CONNTRACK_SYN_SENT2
88 #define sIV TCP_CONNTRACK_MAX
89 #define sIG TCP_CONNTRACK_IGNORE
90 
91 /* What TCP flags are set from RST/SYN/FIN/ACK. */
92 enum tcp_bit_set {
93 	TCP_SYN_SET,
94 	TCP_SYNACK_SET,
95 	TCP_FIN_SET,
96 	TCP_ACK_SET,
97 	TCP_RST_SET,
98 	TCP_NONE_SET,
99 };
100 
101 /*
102  * The TCP state transition table needs a few words...
103  *
104  * We are the man in the middle. All the packets go through us
105  * but might get lost in transit to the destination.
106  * It is assumed that the destinations can't receive segments
107  * we haven't seen.
108  *
109  * The checked segment is in window, but our windows are *not*
110  * equivalent with the ones of the sender/receiver. We always
111  * try to guess the state of the current sender.
112  *
113  * The meaning of the states are:
114  *
115  * NONE:	initial state
116  * SYN_SENT:	SYN-only packet seen
117  * SYN_SENT2:	SYN-only packet seen from reply dir, simultaneous open
118  * SYN_RECV:	SYN-ACK packet seen
119  * ESTABLISHED:	ACK packet seen
120  * FIN_WAIT:	FIN packet seen
121  * CLOSE_WAIT:	ACK seen (after FIN)
122  * LAST_ACK:	FIN seen (after FIN)
123  * TIME_WAIT:	last ACK seen
124  * CLOSE:	closed connection (RST)
125  *
126  * Packets marked as IGNORED (sIG):
127  *	if they may be either invalid or valid
128  *	and the receiver may send back a connection
129  *	closing RST or a SYN/ACK.
130  *
131  * Packets marked as INVALID (sIV):
132  *	if we regard them as truly invalid packets
133  */
134 static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
135 	{
136 /* ORIGINAL */
137 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
138 /*syn*/	   { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
139 /*
140  *	sNO -> sSS	Initialize a new connection
141  *	sSS -> sSS	Retransmitted SYN
142  *	sS2 -> sS2	Late retransmitted SYN
143  *	sSR -> sIG
144  *	sES -> sIG	Error: SYNs in window outside the SYN_SENT state
145  *			are errors. Receiver will reply with RST
146  *			and close the connection.
147  *			Or we are not in sync and hold a dead connection.
148  *	sFW -> sIG
149  *	sCW -> sIG
150  *	sLA -> sIG
151  *	sTW -> sSS	Reopened connection (RFC 1122).
152  *	sCL -> sSS
153  */
154 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
155 /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
156 /*
157  *	sNO -> sIV	Too late and no reason to do anything
158  *	sSS -> sIV	Client can't send SYN and then SYN/ACK
159  *	sS2 -> sSR	SYN/ACK sent to SYN2 in simultaneous open
160  *	sSR -> sSR	Late retransmitted SYN/ACK in simultaneous open
161  *	sES -> sIV	Invalid SYN/ACK packets sent by the client
162  *	sFW -> sIV
163  *	sCW -> sIV
164  *	sLA -> sIV
165  *	sTW -> sIV
166  *	sCL -> sIV
167  */
168 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
169 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
170 /*
171  *	sNO -> sIV	Too late and no reason to do anything...
172  *	sSS -> sIV	Client migth not send FIN in this state:
173  *			we enforce waiting for a SYN/ACK reply first.
174  *	sS2 -> sIV
175  *	sSR -> sFW	Close started.
176  *	sES -> sFW
177  *	sFW -> sLA	FIN seen in both directions, waiting for
178  *			the last ACK.
179  *			Migth be a retransmitted FIN as well...
180  *	sCW -> sLA
181  *	sLA -> sLA	Retransmitted FIN. Remain in the same state.
182  *	sTW -> sTW
183  *	sCL -> sCL
184  */
185 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
186 /*ack*/	   { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
187 /*
188  *	sNO -> sES	Assumed.
189  *	sSS -> sIV	ACK is invalid: we haven't seen a SYN/ACK yet.
190  *	sS2 -> sIV
191  *	sSR -> sES	Established state is reached.
192  *	sES -> sES	:-)
193  *	sFW -> sCW	Normal close request answered by ACK.
194  *	sCW -> sCW
195  *	sLA -> sTW	Last ACK detected (RFC5961 challenged)
196  *	sTW -> sTW	Retransmitted last ACK. Remain in the same state.
197  *	sCL -> sCL
198  */
199 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
200 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
201 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
202 	},
203 	{
204 /* REPLY */
205 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
206 /*syn*/	   { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
207 /*
208  *	sNO -> sIV	Never reached.
209  *	sSS -> sS2	Simultaneous open
210  *	sS2 -> sS2	Retransmitted simultaneous SYN
211  *	sSR -> sIV	Invalid SYN packets sent by the server
212  *	sES -> sIV
213  *	sFW -> sIV
214  *	sCW -> sIV
215  *	sLA -> sIV
216  *	sTW -> sSS	Reopened connection, but server may have switched role
217  *	sCL -> sIV
218  */
219 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
220 /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
221 /*
222  *	sSS -> sSR	Standard open.
223  *	sS2 -> sSR	Simultaneous open
224  *	sSR -> sIG	Retransmitted SYN/ACK, ignore it.
225  *	sES -> sIG	Late retransmitted SYN/ACK?
226  *	sFW -> sIG	Might be SYN/ACK answering ignored SYN
227  *	sCW -> sIG
228  *	sLA -> sIG
229  *	sTW -> sIG
230  *	sCL -> sIG
231  */
232 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
233 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
234 /*
235  *	sSS -> sIV	Server might not send FIN in this state.
236  *	sS2 -> sIV
237  *	sSR -> sFW	Close started.
238  *	sES -> sFW
239  *	sFW -> sLA	FIN seen in both directions.
240  *	sCW -> sLA
241  *	sLA -> sLA	Retransmitted FIN.
242  *	sTW -> sTW
243  *	sCL -> sCL
244  */
245 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
246 /*ack*/	   { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
247 /*
248  *	sSS -> sIG	Might be a half-open connection.
249  *	sS2 -> sIG
250  *	sSR -> sSR	Might answer late resent SYN.
251  *	sES -> sES	:-)
252  *	sFW -> sCW	Normal close request answered by ACK.
253  *	sCW -> sCW
254  *	sLA -> sTW	Last ACK detected (RFC5961 challenged)
255  *	sTW -> sTW	Retransmitted last ACK.
256  *	sCL -> sCL
257  */
258 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
259 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
260 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
261 	}
262 };
263 
264 #ifdef CONFIG_NF_CONNTRACK_PROCFS
265 /* Print out the private part of the conntrack. */
tcp_print_conntrack(struct seq_file * s,struct nf_conn * ct)266 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
267 {
268 	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
269 		return;
270 
271 	seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
272 }
273 #endif
274 
get_conntrack_index(const struct tcphdr * tcph)275 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
276 {
277 	if (tcph->rst) return TCP_RST_SET;
278 	else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
279 	else if (tcph->fin) return TCP_FIN_SET;
280 	else if (tcph->ack) return TCP_ACK_SET;
281 	else return TCP_NONE_SET;
282 }
283 
284 /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
285    in IP Filter' by Guido van Rooij.
286 
287    http://www.sane.nl/events/sane2000/papers.html
288    http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
289 
290    The boundaries and the conditions are changed according to RFC793:
291    the packet must intersect the window (i.e. segments may be
292    after the right or before the left edge) and thus receivers may ACK
293    segments after the right edge of the window.
294 
295 	td_maxend = max(sack + max(win,1)) seen in reply packets
296 	td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
297 	td_maxwin += seq + len - sender.td_maxend
298 			if seq + len > sender.td_maxend
299 	td_end    = max(seq + len) seen in sent packets
300 
301    I.   Upper bound for valid data:	seq <= sender.td_maxend
302    II.  Lower bound for valid data:	seq + len >= sender.td_end - receiver.td_maxwin
303    III.	Upper bound for valid (s)ack:   sack <= receiver.td_end
304    IV.	Lower bound for valid (s)ack:	sack >= receiver.td_end - MAXACKWINDOW
305 
306    where sack is the highest right edge of sack block found in the packet
307    or ack in the case of packet without SACK option.
308 
309    The upper bound limit for a valid (s)ack is not ignored -
310    we doesn't have to deal with fragments.
311 */
312 
segment_seq_plus_len(__u32 seq,size_t len,unsigned int dataoff,const struct tcphdr * tcph)313 static inline __u32 segment_seq_plus_len(__u32 seq,
314 					 size_t len,
315 					 unsigned int dataoff,
316 					 const struct tcphdr *tcph)
317 {
318 	/* XXX Should I use payload length field in IP/IPv6 header ?
319 	 * - YK */
320 	return (seq + len - dataoff - tcph->doff*4
321 		+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
322 }
323 
324 /* Fixme: what about big packets? */
325 #define MAXACKWINCONST			66000
326 #define MAXACKWINDOW(sender)						\
327 	((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin	\
328 					      : MAXACKWINCONST)
329 
330 /*
331  * Simplified tcp_parse_options routine from tcp_input.c
332  */
tcp_options(const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,struct ip_ct_tcp_state * state)333 static void tcp_options(const struct sk_buff *skb,
334 			unsigned int dataoff,
335 			const struct tcphdr *tcph,
336 			struct ip_ct_tcp_state *state)
337 {
338 	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
339 	const unsigned char *ptr;
340 	int length = (tcph->doff*4) - sizeof(struct tcphdr);
341 
342 	if (!length)
343 		return;
344 
345 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
346 				 length, buff);
347 	if (!ptr)
348 		return;
349 
350 	state->td_scale = 0;
351 	state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
352 
353 	while (length > 0) {
354 		int opcode=*ptr++;
355 		int opsize;
356 
357 		switch (opcode) {
358 		case TCPOPT_EOL:
359 			return;
360 		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
361 			length--;
362 			continue;
363 		default:
364 			if (length < 2)
365 				return;
366 			opsize=*ptr++;
367 			if (opsize < 2) /* "silly options" */
368 				return;
369 			if (opsize > length)
370 				return;	/* don't parse partial options */
371 
372 			if (opcode == TCPOPT_SACK_PERM
373 			    && opsize == TCPOLEN_SACK_PERM)
374 				state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
375 			else if (opcode == TCPOPT_WINDOW
376 				 && opsize == TCPOLEN_WINDOW) {
377 				state->td_scale = *(u_int8_t *)ptr;
378 
379 				if (state->td_scale > TCP_MAX_WSCALE)
380 					state->td_scale = TCP_MAX_WSCALE;
381 
382 				state->flags |=
383 					IP_CT_TCP_FLAG_WINDOW_SCALE;
384 			}
385 			ptr += opsize - 2;
386 			length -= opsize;
387 		}
388 	}
389 }
390 
tcp_sack(const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,__u32 * sack)391 static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
392                      const struct tcphdr *tcph, __u32 *sack)
393 {
394 	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
395 	const unsigned char *ptr;
396 	int length = (tcph->doff*4) - sizeof(struct tcphdr);
397 	__u32 tmp;
398 
399 	if (!length)
400 		return;
401 
402 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
403 				 length, buff);
404 	if (!ptr)
405 		return;
406 
407 	/* Fast path for timestamp-only option */
408 	if (length == TCPOLEN_TSTAMP_ALIGNED
409 	    && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
410 				       | (TCPOPT_NOP << 16)
411 				       | (TCPOPT_TIMESTAMP << 8)
412 				       | TCPOLEN_TIMESTAMP))
413 		return;
414 
415 	while (length > 0) {
416 		int opcode = *ptr++;
417 		int opsize, i;
418 
419 		switch (opcode) {
420 		case TCPOPT_EOL:
421 			return;
422 		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
423 			length--;
424 			continue;
425 		default:
426 			if (length < 2)
427 				return;
428 			opsize = *ptr++;
429 			if (opsize < 2) /* "silly options" */
430 				return;
431 			if (opsize > length)
432 				return;	/* don't parse partial options */
433 
434 			if (opcode == TCPOPT_SACK
435 			    && opsize >= (TCPOLEN_SACK_BASE
436 					  + TCPOLEN_SACK_PERBLOCK)
437 			    && !((opsize - TCPOLEN_SACK_BASE)
438 				 % TCPOLEN_SACK_PERBLOCK)) {
439 				for (i = 0;
440 				     i < (opsize - TCPOLEN_SACK_BASE);
441 				     i += TCPOLEN_SACK_PERBLOCK) {
442 					tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
443 
444 					if (after(tmp, *sack))
445 						*sack = tmp;
446 				}
447 				return;
448 			}
449 			ptr += opsize - 2;
450 			length -= opsize;
451 		}
452 	}
453 }
454 
tcp_init_sender(struct ip_ct_tcp_state * sender,struct ip_ct_tcp_state * receiver,const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,u32 end,u32 win,enum ip_conntrack_dir dir)455 static void tcp_init_sender(struct ip_ct_tcp_state *sender,
456 			    struct ip_ct_tcp_state *receiver,
457 			    const struct sk_buff *skb,
458 			    unsigned int dataoff,
459 			    const struct tcphdr *tcph,
460 			    u32 end, u32 win,
461 			    enum ip_conntrack_dir dir)
462 {
463 	/* SYN-ACK in reply to a SYN
464 	 * or SYN from reply direction in simultaneous open.
465 	 */
466 	sender->td_end =
467 	sender->td_maxend = end;
468 	sender->td_maxwin = (win == 0 ? 1 : win);
469 
470 	tcp_options(skb, dataoff, tcph, sender);
471 	/* RFC 1323:
472 	 * Both sides must send the Window Scale option
473 	 * to enable window scaling in either direction.
474 	 */
475 	if (dir == IP_CT_DIR_REPLY &&
476 	    !(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
477 	      receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
478 		sender->td_scale = 0;
479 		receiver->td_scale = 0;
480 	}
481 }
482 
483 __printf(6, 7)
nf_tcp_log_invalid(const struct sk_buff * skb,const struct nf_conn * ct,const struct nf_hook_state * state,const struct ip_ct_tcp_state * sender,enum nf_ct_tcp_action ret,const char * fmt,...)484 static enum nf_ct_tcp_action nf_tcp_log_invalid(const struct sk_buff *skb,
485 						const struct nf_conn *ct,
486 						const struct nf_hook_state *state,
487 						const struct ip_ct_tcp_state *sender,
488 						enum nf_ct_tcp_action ret,
489 						const char *fmt, ...)
490 {
491 	const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
492 	struct va_format vaf;
493 	va_list args;
494 	bool be_liberal;
495 
496 	be_liberal = sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL || tn->tcp_be_liberal;
497 	if (be_liberal)
498 		return NFCT_TCP_ACCEPT;
499 
500 	va_start(args, fmt);
501 	vaf.fmt = fmt;
502 	vaf.va = &args;
503 	nf_ct_l4proto_log_invalid(skb, ct, state, "%pV", &vaf);
504 	va_end(args);
505 
506 	return ret;
507 }
508 
509 static enum nf_ct_tcp_action
tcp_in_window(struct nf_conn * ct,enum ip_conntrack_dir dir,unsigned int index,const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,const struct nf_hook_state * hook_state)510 tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
511 	      unsigned int index, const struct sk_buff *skb,
512 	      unsigned int dataoff, const struct tcphdr *tcph,
513 	      const struct nf_hook_state *hook_state)
514 {
515 	struct ip_ct_tcp *state = &ct->proto.tcp;
516 	struct ip_ct_tcp_state *sender = &state->seen[dir];
517 	struct ip_ct_tcp_state *receiver = &state->seen[!dir];
518 	__u32 seq, ack, sack, end, win, swin;
519 	bool in_recv_win, seq_ok;
520 	s32 receiver_offset;
521 	u16 win_raw;
522 
523 	/*
524 	 * Get the required data from the packet.
525 	 */
526 	seq = ntohl(tcph->seq);
527 	ack = sack = ntohl(tcph->ack_seq);
528 	win_raw = ntohs(tcph->window);
529 	win = win_raw;
530 	end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
531 
532 	if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
533 		tcp_sack(skb, dataoff, tcph, &sack);
534 
535 	/* Take into account NAT sequence number mangling */
536 	receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
537 	ack -= receiver_offset;
538 	sack -= receiver_offset;
539 
540 	if (sender->td_maxwin == 0) {
541 		/*
542 		 * Initialize sender data.
543 		 */
544 		if (tcph->syn) {
545 			tcp_init_sender(sender, receiver,
546 					skb, dataoff, tcph,
547 					end, win, dir);
548 			if (!tcph->ack)
549 				/* Simultaneous open */
550 				return NFCT_TCP_ACCEPT;
551 		} else {
552 			/*
553 			 * We are in the middle of a connection,
554 			 * its history is lost for us.
555 			 * Let's try to use the data from the packet.
556 			 */
557 			sender->td_end = end;
558 			swin = win << sender->td_scale;
559 			sender->td_maxwin = (swin == 0 ? 1 : swin);
560 			sender->td_maxend = end + sender->td_maxwin;
561 			if (receiver->td_maxwin == 0) {
562 				/* We haven't seen traffic in the other
563 				 * direction yet but we have to tweak window
564 				 * tracking to pass III and IV until that
565 				 * happens.
566 				 */
567 				receiver->td_end = receiver->td_maxend = sack;
568 			} else if (sack == receiver->td_end + 1) {
569 				/* Likely a reply to a keepalive.
570 				 * Needed for III.
571 				 */
572 				receiver->td_end++;
573 			}
574 
575 		}
576 	} else if (tcph->syn &&
577 		   after(end, sender->td_end) &&
578 		   (state->state == TCP_CONNTRACK_SYN_SENT ||
579 		    state->state == TCP_CONNTRACK_SYN_RECV)) {
580 		/*
581 		 * RFC 793: "if a TCP is reinitialized ... then it need
582 		 * not wait at all; it must only be sure to use sequence
583 		 * numbers larger than those recently used."
584 		 *
585 		 * Re-init state for this direction, just like for the first
586 		 * syn(-ack) reply, it might differ in seq, ack or tcp options.
587 		 */
588 		tcp_init_sender(sender, receiver,
589 				skb, dataoff, tcph,
590 				end, win, dir);
591 
592 		if (dir == IP_CT_DIR_REPLY && !tcph->ack)
593 			return NFCT_TCP_ACCEPT;
594 	}
595 
596 	if (!(tcph->ack)) {
597 		/*
598 		 * If there is no ACK, just pretend it was set and OK.
599 		 */
600 		ack = sack = receiver->td_end;
601 	} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
602 		    (TCP_FLAG_ACK|TCP_FLAG_RST))
603 		   && (ack == 0)) {
604 		/*
605 		 * Broken TCP stacks, that set ACK in RST packets as well
606 		 * with zero ack value.
607 		 */
608 		ack = sack = receiver->td_end;
609 	}
610 
611 	if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
612 		/*
613 		 * RST sent answering SYN.
614 		 */
615 		seq = end = sender->td_end;
616 
617 	seq_ok = before(seq, sender->td_maxend + 1);
618 	if (!seq_ok) {
619 		u32 overshot = end - sender->td_maxend + 1;
620 		bool ack_ok;
621 
622 		ack_ok = after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1);
623 		in_recv_win = receiver->td_maxwin &&
624 			      after(end, sender->td_end - receiver->td_maxwin - 1);
625 
626 		if (in_recv_win &&
627 		    ack_ok &&
628 		    overshot <= receiver->td_maxwin &&
629 		    before(sack, receiver->td_end + 1)) {
630 			/* Work around TCPs that send more bytes than allowed by
631 			 * the receive window.
632 			 *
633 			 * If the (marked as invalid) packet is allowed to pass by
634 			 * the ruleset and the peer acks this data, then its possible
635 			 * all future packets will trigger 'ACK is over upper bound' check.
636 			 *
637 			 * Thus if only the sequence check fails then do update td_end so
638 			 * possible ACK for this data can update internal state.
639 			 */
640 			sender->td_end = end;
641 			sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
642 
643 			return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
644 						  "%u bytes more than expected", overshot);
645 		}
646 
647 		return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
648 					  "SEQ is over upper bound %u (over the window of the receiver)",
649 					  sender->td_maxend + 1);
650 	}
651 
652 	if (!before(sack, receiver->td_end + 1))
653 		return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_INVALID,
654 					  "ACK is over upper bound %u (ACKed data not seen yet)",
655 					  receiver->td_end + 1);
656 
657 	/* Is the ending sequence in the receive window (if available)? */
658 	in_recv_win = !receiver->td_maxwin ||
659 		      after(end, sender->td_end - receiver->td_maxwin - 1);
660 	if (!in_recv_win)
661 		return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
662 					  "SEQ is under lower bound %u (already ACKed data retransmitted)",
663 					  sender->td_end - receiver->td_maxwin - 1);
664 	if (!after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1))
665 		return nf_tcp_log_invalid(skb, ct, hook_state, sender, NFCT_TCP_IGNORE,
666 					  "ignored ACK under lower bound %u (possible overly delayed)",
667 					  receiver->td_end - MAXACKWINDOW(sender) - 1);
668 
669 	/* Take into account window scaling (RFC 1323). */
670 	if (!tcph->syn)
671 		win <<= sender->td_scale;
672 
673 	/* Update sender data. */
674 	swin = win + (sack - ack);
675 	if (sender->td_maxwin < swin)
676 		sender->td_maxwin = swin;
677 	if (after(end, sender->td_end)) {
678 		sender->td_end = end;
679 		sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
680 	}
681 	if (tcph->ack) {
682 		if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
683 			sender->td_maxack = ack;
684 			sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
685 		} else if (after(ack, sender->td_maxack)) {
686 			sender->td_maxack = ack;
687 		}
688 	}
689 
690 	/* Update receiver data. */
691 	if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
692 		receiver->td_maxwin += end - sender->td_maxend;
693 	if (after(sack + win, receiver->td_maxend - 1)) {
694 		receiver->td_maxend = sack + win;
695 		if (win == 0)
696 			receiver->td_maxend++;
697 	}
698 	if (ack == receiver->td_end)
699 		receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
700 
701 	/* Check retransmissions. */
702 	if (index == TCP_ACK_SET) {
703 		if (state->last_dir == dir &&
704 		    state->last_seq == seq &&
705 		    state->last_ack == ack &&
706 		    state->last_end == end &&
707 		    state->last_win == win_raw) {
708 			state->retrans++;
709 		} else {
710 			state->last_dir = dir;
711 			state->last_seq = seq;
712 			state->last_ack = ack;
713 			state->last_end = end;
714 			state->last_win = win_raw;
715 			state->retrans = 0;
716 		}
717 	}
718 
719 	return NFCT_TCP_ACCEPT;
720 }
721 
nf_tcp_handle_invalid(struct nf_conn * ct,enum ip_conntrack_dir dir,int index,const struct sk_buff * skb,const struct nf_hook_state * hook_state)722 static void __cold nf_tcp_handle_invalid(struct nf_conn *ct,
723 					 enum ip_conntrack_dir dir,
724 					 int index,
725 					 const struct sk_buff *skb,
726 					 const struct nf_hook_state *hook_state)
727 {
728 	const unsigned int *timeouts;
729 	const struct nf_tcp_net *tn;
730 	unsigned int timeout;
731 	u32 expires;
732 
733 	if (!test_bit(IPS_ASSURED_BIT, &ct->status) ||
734 	    test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
735 		return;
736 
737 	/* We don't want to have connections hanging around in ESTABLISHED
738 	 * state for long time 'just because' conntrack deemed a FIN/RST
739 	 * out-of-window.
740 	 *
741 	 * Shrink the timeout just like when there is unacked data.
742 	 * This speeds up eviction of 'dead' connections where the
743 	 * connection and conntracks internal state are out of sync.
744 	 */
745 	switch (index) {
746 	case TCP_RST_SET:
747 	case TCP_FIN_SET:
748 		break;
749 	default:
750 		return;
751 	}
752 
753 	if (ct->proto.tcp.last_dir != dir &&
754 	    (ct->proto.tcp.last_index == TCP_FIN_SET ||
755 	     ct->proto.tcp.last_index == TCP_RST_SET)) {
756 		expires = nf_ct_expires(ct);
757 		if (expires < 120 * HZ)
758 			return;
759 
760 		tn = nf_tcp_pernet(nf_ct_net(ct));
761 		timeouts = nf_ct_timeout_lookup(ct);
762 		if (!timeouts)
763 			timeouts = tn->timeouts;
764 
765 		timeout = READ_ONCE(timeouts[TCP_CONNTRACK_UNACK]);
766 		if (expires > timeout) {
767 			nf_ct_l4proto_log_invalid(skb, ct, hook_state,
768 					  "packet (index %d, dir %d) response for index %d lower timeout to %u",
769 					  index, dir, ct->proto.tcp.last_index, timeout);
770 
771 			WRITE_ONCE(ct->timeout, timeout + nfct_time_stamp);
772 		}
773 	} else {
774 		ct->proto.tcp.last_index = index;
775 		ct->proto.tcp.last_dir = dir;
776 	}
777 }
778 
779 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
780 static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
781 				 TCPHDR_URG) + 1] =
782 {
783 	[TCPHDR_SYN]				= 1,
784 	[TCPHDR_SYN|TCPHDR_URG]			= 1,
785 	[TCPHDR_SYN|TCPHDR_ACK]			= 1,
786 	[TCPHDR_RST]				= 1,
787 	[TCPHDR_RST|TCPHDR_ACK]			= 1,
788 	[TCPHDR_FIN|TCPHDR_ACK]			= 1,
789 	[TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG]	= 1,
790 	[TCPHDR_ACK]				= 1,
791 	[TCPHDR_ACK|TCPHDR_URG]			= 1,
792 };
793 
tcp_error_log(const struct sk_buff * skb,const struct nf_hook_state * state,const char * msg)794 static void tcp_error_log(const struct sk_buff *skb,
795 			  const struct nf_hook_state *state,
796 			  const char *msg)
797 {
798 	nf_l4proto_log_invalid(skb, state, IPPROTO_TCP, "%s", msg);
799 }
800 
801 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
tcp_error(const struct tcphdr * th,struct sk_buff * skb,unsigned int dataoff,const struct nf_hook_state * state)802 static bool tcp_error(const struct tcphdr *th,
803 		      struct sk_buff *skb,
804 		      unsigned int dataoff,
805 		      const struct nf_hook_state *state)
806 {
807 	unsigned int tcplen = skb->len - dataoff;
808 	u8 tcpflags;
809 
810 	/* Not whole TCP header or malformed packet */
811 	if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
812 		tcp_error_log(skb, state, "truncated packet");
813 		return true;
814 	}
815 
816 	/* Checksum invalid? Ignore.
817 	 * We skip checking packets on the outgoing path
818 	 * because the checksum is assumed to be correct.
819 	 */
820 	/* FIXME: Source route IP option packets --RR */
821 	if (state->net->ct.sysctl_checksum &&
822 	    state->hook == NF_INET_PRE_ROUTING &&
823 	    nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
824 		tcp_error_log(skb, state, "bad checksum");
825 		return true;
826 	}
827 
828 	/* Check TCP flags. */
829 	tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
830 	if (!tcp_valid_flags[tcpflags]) {
831 		tcp_error_log(skb, state, "invalid tcp flag combination");
832 		return true;
833 	}
834 
835 	return false;
836 }
837 
tcp_new(struct nf_conn * ct,const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * th)838 static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
839 			     unsigned int dataoff,
840 			     const struct tcphdr *th)
841 {
842 	enum tcp_conntrack new_state;
843 	struct net *net = nf_ct_net(ct);
844 	const struct nf_tcp_net *tn = nf_tcp_pernet(net);
845 
846 	/* Don't need lock here: this conntrack not in circulation yet */
847 	new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
848 
849 	/* Invalid: delete conntrack */
850 	if (new_state >= TCP_CONNTRACK_MAX) {
851 		pr_debug("nf_ct_tcp: invalid new deleting.\n");
852 		return false;
853 	}
854 
855 	if (new_state == TCP_CONNTRACK_SYN_SENT) {
856 		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
857 		/* SYN packet */
858 		ct->proto.tcp.seen[0].td_end =
859 			segment_seq_plus_len(ntohl(th->seq), skb->len,
860 					     dataoff, th);
861 		ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
862 		if (ct->proto.tcp.seen[0].td_maxwin == 0)
863 			ct->proto.tcp.seen[0].td_maxwin = 1;
864 		ct->proto.tcp.seen[0].td_maxend =
865 			ct->proto.tcp.seen[0].td_end;
866 
867 		tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
868 	} else if (tn->tcp_loose == 0) {
869 		/* Don't try to pick up connections. */
870 		return false;
871 	} else {
872 		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
873 		/*
874 		 * We are in the middle of a connection,
875 		 * its history is lost for us.
876 		 * Let's try to use the data from the packet.
877 		 */
878 		ct->proto.tcp.seen[0].td_end =
879 			segment_seq_plus_len(ntohl(th->seq), skb->len,
880 					     dataoff, th);
881 		ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
882 		if (ct->proto.tcp.seen[0].td_maxwin == 0)
883 			ct->proto.tcp.seen[0].td_maxwin = 1;
884 		ct->proto.tcp.seen[0].td_maxend =
885 			ct->proto.tcp.seen[0].td_end +
886 			ct->proto.tcp.seen[0].td_maxwin;
887 
888 		/* We assume SACK and liberal window checking to handle
889 		 * window scaling */
890 		ct->proto.tcp.seen[0].flags =
891 		ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
892 					      IP_CT_TCP_FLAG_BE_LIBERAL;
893 	}
894 
895 	/* tcp_packet will set them */
896 	ct->proto.tcp.last_index = TCP_NONE_SET;
897 	return true;
898 }
899 
tcp_can_early_drop(const struct nf_conn * ct)900 static bool tcp_can_early_drop(const struct nf_conn *ct)
901 {
902 	switch (ct->proto.tcp.state) {
903 	case TCP_CONNTRACK_FIN_WAIT:
904 	case TCP_CONNTRACK_LAST_ACK:
905 	case TCP_CONNTRACK_TIME_WAIT:
906 	case TCP_CONNTRACK_CLOSE:
907 	case TCP_CONNTRACK_CLOSE_WAIT:
908 		return true;
909 	default:
910 		break;
911 	}
912 
913 	return false;
914 }
915 
nf_ct_tcp_state_reset(struct ip_ct_tcp_state * state)916 static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
917 {
918 	state->td_end		= 0;
919 	state->td_maxend	= 0;
920 	state->td_maxwin	= 0;
921 	state->td_maxack	= 0;
922 	state->td_scale		= 0;
923 	state->flags		&= IP_CT_TCP_FLAG_BE_LIBERAL;
924 }
925 
926 /* Returns verdict for packet, or -1 for invalid. */
nf_conntrack_tcp_packet(struct nf_conn * ct,struct sk_buff * skb,unsigned int dataoff,enum ip_conntrack_info ctinfo,const struct nf_hook_state * state)927 int nf_conntrack_tcp_packet(struct nf_conn *ct,
928 			    struct sk_buff *skb,
929 			    unsigned int dataoff,
930 			    enum ip_conntrack_info ctinfo,
931 			    const struct nf_hook_state *state)
932 {
933 	struct net *net = nf_ct_net(ct);
934 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
935 	struct nf_conntrack_tuple *tuple;
936 	enum tcp_conntrack new_state, old_state;
937 	unsigned int index, *timeouts;
938 	enum nf_ct_tcp_action res;
939 	enum ip_conntrack_dir dir;
940 	const struct tcphdr *th;
941 	struct tcphdr _tcph;
942 	unsigned long timeout;
943 
944 	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
945 	if (th == NULL)
946 		return -NF_ACCEPT;
947 
948 	if (tcp_error(th, skb, dataoff, state))
949 		return -NF_ACCEPT;
950 
951 	if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th))
952 		return -NF_ACCEPT;
953 
954 	spin_lock_bh(&ct->lock);
955 	old_state = ct->proto.tcp.state;
956 	dir = CTINFO2DIR(ctinfo);
957 	index = get_conntrack_index(th);
958 	new_state = tcp_conntracks[dir][index][old_state];
959 	tuple = &ct->tuplehash[dir].tuple;
960 
961 	switch (new_state) {
962 	case TCP_CONNTRACK_SYN_SENT:
963 		if (old_state < TCP_CONNTRACK_TIME_WAIT)
964 			break;
965 		/* RFC 1122: "When a connection is closed actively,
966 		 * it MUST linger in TIME-WAIT state for a time 2xMSL
967 		 * (Maximum Segment Lifetime). However, it MAY accept
968 		 * a new SYN from the remote TCP to reopen the connection
969 		 * directly from TIME-WAIT state, if..."
970 		 * We ignore the conditions because we are in the
971 		 * TIME-WAIT state anyway.
972 		 *
973 		 * Handle aborted connections: we and the server
974 		 * think there is an existing connection but the client
975 		 * aborts it and starts a new one.
976 		 */
977 		if (((ct->proto.tcp.seen[dir].flags
978 		      | ct->proto.tcp.seen[!dir].flags)
979 		     & IP_CT_TCP_FLAG_CLOSE_INIT)
980 		    || (ct->proto.tcp.last_dir == dir
981 		        && ct->proto.tcp.last_index == TCP_RST_SET)) {
982 			/* Attempt to reopen a closed/aborted connection.
983 			 * Delete this connection and look up again. */
984 			spin_unlock_bh(&ct->lock);
985 
986 			/* Only repeat if we can actually remove the timer.
987 			 * Destruction may already be in progress in process
988 			 * context and we must give it a chance to terminate.
989 			 */
990 			if (nf_ct_kill(ct))
991 				return -NF_REPEAT;
992 			return NF_DROP;
993 		}
994 		fallthrough;
995 	case TCP_CONNTRACK_IGNORE:
996 		/* Ignored packets:
997 		 *
998 		 * Our connection entry may be out of sync, so ignore
999 		 * packets which may signal the real connection between
1000 		 * the client and the server.
1001 		 *
1002 		 * a) SYN in ORIGINAL
1003 		 * b) SYN/ACK in REPLY
1004 		 * c) ACK in reply direction after initial SYN in original.
1005 		 *
1006 		 * If the ignored packet is invalid, the receiver will send
1007 		 * a RST we'll catch below.
1008 		 */
1009 		if (index == TCP_SYNACK_SET
1010 		    && ct->proto.tcp.last_index == TCP_SYN_SET
1011 		    && ct->proto.tcp.last_dir != dir
1012 		    && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1013 			/* b) This SYN/ACK acknowledges a SYN that we earlier
1014 			 * ignored as invalid. This means that the client and
1015 			 * the server are both in sync, while the firewall is
1016 			 * not. We get in sync from the previously annotated
1017 			 * values.
1018 			 */
1019 			old_state = TCP_CONNTRACK_SYN_SENT;
1020 			new_state = TCP_CONNTRACK_SYN_RECV;
1021 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
1022 				ct->proto.tcp.last_end;
1023 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
1024 				ct->proto.tcp.last_end;
1025 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
1026 				ct->proto.tcp.last_win == 0 ?
1027 					1 : ct->proto.tcp.last_win;
1028 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
1029 				ct->proto.tcp.last_wscale;
1030 			ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1031 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
1032 				ct->proto.tcp.last_flags;
1033 			nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
1034 			break;
1035 		}
1036 		ct->proto.tcp.last_index = index;
1037 		ct->proto.tcp.last_dir = dir;
1038 		ct->proto.tcp.last_seq = ntohl(th->seq);
1039 		ct->proto.tcp.last_end =
1040 		    segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
1041 		ct->proto.tcp.last_win = ntohs(th->window);
1042 
1043 		/* a) This is a SYN in ORIGINAL. The client and the server
1044 		 * may be in sync but we are not. In that case, we annotate
1045 		 * the TCP options and let the packet go through. If it is a
1046 		 * valid SYN packet, the server will reply with a SYN/ACK, and
1047 		 * then we'll get in sync. Otherwise, the server potentially
1048 		 * responds with a challenge ACK if implementing RFC5961.
1049 		 */
1050 		if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
1051 			struct ip_ct_tcp_state seen = {};
1052 
1053 			ct->proto.tcp.last_flags =
1054 			ct->proto.tcp.last_wscale = 0;
1055 			tcp_options(skb, dataoff, th, &seen);
1056 			if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1057 				ct->proto.tcp.last_flags |=
1058 					IP_CT_TCP_FLAG_WINDOW_SCALE;
1059 				ct->proto.tcp.last_wscale = seen.td_scale;
1060 			}
1061 			if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
1062 				ct->proto.tcp.last_flags |=
1063 					IP_CT_TCP_FLAG_SACK_PERM;
1064 			}
1065 			/* Mark the potential for RFC5961 challenge ACK,
1066 			 * this pose a special problem for LAST_ACK state
1067 			 * as ACK is intrepretated as ACKing last FIN.
1068 			 */
1069 			if (old_state == TCP_CONNTRACK_LAST_ACK)
1070 				ct->proto.tcp.last_flags |=
1071 					IP_CT_EXP_CHALLENGE_ACK;
1072 		}
1073 
1074 		/* possible challenge ack reply to syn */
1075 		if (old_state == TCP_CONNTRACK_SYN_SENT &&
1076 		    index == TCP_ACK_SET &&
1077 		    dir == IP_CT_DIR_REPLY)
1078 			ct->proto.tcp.last_ack = ntohl(th->ack_seq);
1079 
1080 		spin_unlock_bh(&ct->lock);
1081 		nf_ct_l4proto_log_invalid(skb, ct, state,
1082 					  "packet (index %d) in dir %d ignored, state %s",
1083 					  index, dir,
1084 					  tcp_conntrack_names[old_state]);
1085 		return NF_ACCEPT;
1086 	case TCP_CONNTRACK_MAX:
1087 		/* Special case for SYN proxy: when the SYN to the server or
1088 		 * the SYN/ACK from the server is lost, the client may transmit
1089 		 * a keep-alive packet while in SYN_SENT state. This needs to
1090 		 * be associated with the original conntrack entry in order to
1091 		 * generate a new SYN with the correct sequence number.
1092 		 */
1093 		if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
1094 		    index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
1095 		    ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
1096 		    ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
1097 			pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
1098 			spin_unlock_bh(&ct->lock);
1099 			return NF_ACCEPT;
1100 		}
1101 
1102 		/* Invalid packet */
1103 		spin_unlock_bh(&ct->lock);
1104 		nf_ct_l4proto_log_invalid(skb, ct, state,
1105 					  "packet (index %d) in dir %d invalid, state %s",
1106 					  index, dir,
1107 					  tcp_conntrack_names[old_state]);
1108 		return -NF_ACCEPT;
1109 	case TCP_CONNTRACK_TIME_WAIT:
1110 		/* RFC5961 compliance cause stack to send "challenge-ACK"
1111 		 * e.g. in response to spurious SYNs.  Conntrack MUST
1112 		 * not believe this ACK is acking last FIN.
1113 		 */
1114 		if (old_state == TCP_CONNTRACK_LAST_ACK &&
1115 		    index == TCP_ACK_SET &&
1116 		    ct->proto.tcp.last_dir != dir &&
1117 		    ct->proto.tcp.last_index == TCP_SYN_SET &&
1118 		    (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
1119 			/* Detected RFC5961 challenge ACK */
1120 			ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1121 			spin_unlock_bh(&ct->lock);
1122 			nf_ct_l4proto_log_invalid(skb, ct, state, "challenge-ack ignored");
1123 			return NF_ACCEPT; /* Don't change state */
1124 		}
1125 		break;
1126 	case TCP_CONNTRACK_SYN_SENT2:
1127 		/* tcp_conntracks table is not smart enough to handle
1128 		 * simultaneous open.
1129 		 */
1130 		ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
1131 		break;
1132 	case TCP_CONNTRACK_SYN_RECV:
1133 		if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
1134 		    ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
1135 			new_state = TCP_CONNTRACK_ESTABLISHED;
1136 		break;
1137 	case TCP_CONNTRACK_CLOSE:
1138 		if (index != TCP_RST_SET)
1139 			break;
1140 
1141 		/* If we are closing, tuple might have been re-used already.
1142 		 * last_index, last_ack, and all other ct fields used for
1143 		 * sequence/window validation are outdated in that case.
1144 		 *
1145 		 * As the conntrack can already be expired by GC under pressure,
1146 		 * just skip validation checks.
1147 		 */
1148 		if (tcp_can_early_drop(ct))
1149 			goto in_window;
1150 
1151 		/* td_maxack might be outdated if we let a SYN through earlier */
1152 		if ((ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) &&
1153 		    ct->proto.tcp.last_index != TCP_SYN_SET) {
1154 			u32 seq = ntohl(th->seq);
1155 
1156 			/* If we are not in established state and SEQ=0 this is most
1157 			 * likely an answer to a SYN we let go through above (last_index
1158 			 * can be updated due to out-of-order ACKs).
1159 			 */
1160 			if (seq == 0 && !nf_conntrack_tcp_established(ct))
1161 				break;
1162 
1163 			if (before(seq, ct->proto.tcp.seen[!dir].td_maxack) &&
1164 			    !tn->tcp_ignore_invalid_rst) {
1165 				/* Invalid RST  */
1166 				spin_unlock_bh(&ct->lock);
1167 				nf_ct_l4proto_log_invalid(skb, ct, state, "invalid rst");
1168 				return -NF_ACCEPT;
1169 			}
1170 
1171 			if (!nf_conntrack_tcp_established(ct) ||
1172 			    seq == ct->proto.tcp.seen[!dir].td_maxack)
1173 				break;
1174 
1175 			/* Check if rst is part of train, such as
1176 			 *   foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
1177 			 *   foo:80 > bar:4379: R, 235946602:235946602(0)  ack 42
1178 			 */
1179 			if (ct->proto.tcp.last_index == TCP_ACK_SET &&
1180 			    ct->proto.tcp.last_dir == dir &&
1181 			    seq == ct->proto.tcp.last_end)
1182 				break;
1183 
1184 			/* ... RST sequence number doesn't match exactly, keep
1185 			 * established state to allow a possible challenge ACK.
1186 			 */
1187 			new_state = old_state;
1188 		}
1189 		if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
1190 			 && ct->proto.tcp.last_index == TCP_SYN_SET)
1191 			|| (!test_bit(IPS_ASSURED_BIT, &ct->status)
1192 			    && ct->proto.tcp.last_index == TCP_ACK_SET))
1193 		    && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1194 			/* RST sent to invalid SYN or ACK we had let through
1195 			 * at a) and c) above:
1196 			 *
1197 			 * a) SYN was in window then
1198 			 * c) we hold a half-open connection.
1199 			 *
1200 			 * Delete our connection entry.
1201 			 * We skip window checking, because packet might ACK
1202 			 * segments we ignored. */
1203 			goto in_window;
1204 		}
1205 
1206 		/* Reset in response to a challenge-ack we let through earlier */
1207 		if (old_state == TCP_CONNTRACK_SYN_SENT &&
1208 		    ct->proto.tcp.last_index == TCP_ACK_SET &&
1209 		    ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
1210 		    ntohl(th->seq) == ct->proto.tcp.last_ack)
1211 			goto in_window;
1212 
1213 		break;
1214 	default:
1215 		/* Keep compilers happy. */
1216 		break;
1217 	}
1218 
1219 	res = tcp_in_window(ct, dir, index,
1220 			    skb, dataoff, th, state);
1221 	switch (res) {
1222 	case NFCT_TCP_IGNORE:
1223 		spin_unlock_bh(&ct->lock);
1224 		return NF_ACCEPT;
1225 	case NFCT_TCP_INVALID:
1226 		nf_tcp_handle_invalid(ct, dir, index, skb, state);
1227 		spin_unlock_bh(&ct->lock);
1228 		return -NF_ACCEPT;
1229 	case NFCT_TCP_ACCEPT:
1230 		break;
1231 	}
1232      in_window:
1233 	/* From now on we have got in-window packets */
1234 	ct->proto.tcp.last_index = index;
1235 	ct->proto.tcp.last_dir = dir;
1236 
1237 	pr_debug("tcp_conntracks: ");
1238 	nf_ct_dump_tuple(tuple);
1239 	pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
1240 		 (th->syn ? 1 : 0), (th->ack ? 1 : 0),
1241 		 (th->fin ? 1 : 0), (th->rst ? 1 : 0),
1242 		 old_state, new_state);
1243 
1244 	ct->proto.tcp.state = new_state;
1245 	if (old_state != new_state
1246 	    && new_state == TCP_CONNTRACK_FIN_WAIT)
1247 		ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1248 
1249 	timeouts = nf_ct_timeout_lookup(ct);
1250 	if (!timeouts)
1251 		timeouts = tn->timeouts;
1252 
1253 	if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1254 	    timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1255 		timeout = timeouts[TCP_CONNTRACK_RETRANS];
1256 	else if (unlikely(index == TCP_RST_SET))
1257 		timeout = timeouts[TCP_CONNTRACK_CLOSE];
1258 	else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1259 		 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1260 		 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1261 		timeout = timeouts[TCP_CONNTRACK_UNACK];
1262 	else if (ct->proto.tcp.last_win == 0 &&
1263 		 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1264 		timeout = timeouts[TCP_CONNTRACK_RETRANS];
1265 	else
1266 		timeout = timeouts[new_state];
1267 	spin_unlock_bh(&ct->lock);
1268 
1269 	if (new_state != old_state)
1270 		nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
1271 
1272 	if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1273 		/* If only reply is a RST, we can consider ourselves not to
1274 		   have an established connection: this is a fairly common
1275 		   problem case, so we can delete the conntrack
1276 		   immediately.  --RR */
1277 		if (th->rst) {
1278 			nf_ct_kill_acct(ct, ctinfo, skb);
1279 			return NF_ACCEPT;
1280 		}
1281 
1282 		if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
1283 			/* do not renew timeout on SYN retransmit.
1284 			 *
1285 			 * Else port reuse by client or NAT middlebox can keep
1286 			 * entry alive indefinitely (including nat info).
1287 			 */
1288 			return NF_ACCEPT;
1289 		}
1290 
1291 		/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
1292 		 * pickup with loose=1. Avoid large ESTABLISHED timeout.
1293 		 */
1294 		if (new_state == TCP_CONNTRACK_ESTABLISHED &&
1295 		    timeout > timeouts[TCP_CONNTRACK_UNACK])
1296 			timeout = timeouts[TCP_CONNTRACK_UNACK];
1297 	} else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
1298 		   && (old_state == TCP_CONNTRACK_SYN_RECV
1299 		       || old_state == TCP_CONNTRACK_ESTABLISHED)
1300 		   && new_state == TCP_CONNTRACK_ESTABLISHED) {
1301 		/* Set ASSURED if we see valid ack in ESTABLISHED
1302 		   after SYN_RECV or a valid answer for a picked up
1303 		   connection. */
1304 		set_bit(IPS_ASSURED_BIT, &ct->status);
1305 		nf_conntrack_event_cache(IPCT_ASSURED, ct);
1306 	}
1307 	nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1308 
1309 	return NF_ACCEPT;
1310 }
1311 
1312 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1313 
1314 #include <linux/netfilter/nfnetlink.h>
1315 #include <linux/netfilter/nfnetlink_conntrack.h>
1316 
tcp_to_nlattr(struct sk_buff * skb,struct nlattr * nla,struct nf_conn * ct,bool destroy)1317 static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1318 			 struct nf_conn *ct, bool destroy)
1319 {
1320 	struct nlattr *nest_parms;
1321 	struct nf_ct_tcp_flags tmp = {};
1322 
1323 	spin_lock_bh(&ct->lock);
1324 	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
1325 	if (!nest_parms)
1326 		goto nla_put_failure;
1327 
1328 	if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state))
1329 		goto nla_put_failure;
1330 
1331 	if (destroy)
1332 		goto skip_state;
1333 
1334 	if (nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1335 		       ct->proto.tcp.seen[0].td_scale) ||
1336 	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1337 		       ct->proto.tcp.seen[1].td_scale))
1338 		goto nla_put_failure;
1339 
1340 	tmp.flags = ct->proto.tcp.seen[0].flags;
1341 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1342 		    sizeof(struct nf_ct_tcp_flags), &tmp))
1343 		goto nla_put_failure;
1344 
1345 	tmp.flags = ct->proto.tcp.seen[1].flags;
1346 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1347 		    sizeof(struct nf_ct_tcp_flags), &tmp))
1348 		goto nla_put_failure;
1349 skip_state:
1350 	spin_unlock_bh(&ct->lock);
1351 	nla_nest_end(skb, nest_parms);
1352 
1353 	return 0;
1354 
1355 nla_put_failure:
1356 	spin_unlock_bh(&ct->lock);
1357 	return -1;
1358 }
1359 
1360 static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1361 	[CTA_PROTOINFO_TCP_STATE]	    = { .type = NLA_U8 },
1362 	[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
1363 	[CTA_PROTOINFO_TCP_WSCALE_REPLY]    = { .type = NLA_U8 },
1364 	[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]  = { .len = sizeof(struct nf_ct_tcp_flags) },
1365 	[CTA_PROTOINFO_TCP_FLAGS_REPLY]	    = { .len = sizeof(struct nf_ct_tcp_flags) },
1366 };
1367 
1368 #define TCP_NLATTR_SIZE	( \
1369 	NLA_ALIGN(NLA_HDRLEN + 1) + \
1370 	NLA_ALIGN(NLA_HDRLEN + 1) + \
1371 	NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
1372 	NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
1373 
nlattr_to_tcp(struct nlattr * cda[],struct nf_conn * ct)1374 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1375 {
1376 	struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1377 	struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1378 	int err;
1379 
1380 	/* updates could not contain anything about the private
1381 	 * protocol info, in that case skip the parsing */
1382 	if (!pattr)
1383 		return 0;
1384 
1385 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
1386 					  tcp_nla_policy, NULL);
1387 	if (err < 0)
1388 		return err;
1389 
1390 	if (tb[CTA_PROTOINFO_TCP_STATE] &&
1391 	    nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1392 		return -EINVAL;
1393 
1394 	spin_lock_bh(&ct->lock);
1395 	if (tb[CTA_PROTOINFO_TCP_STATE])
1396 		ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1397 
1398 	if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1399 		struct nf_ct_tcp_flags *attr =
1400 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
1401 		ct->proto.tcp.seen[0].flags &= ~attr->mask;
1402 		ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
1403 	}
1404 
1405 	if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
1406 		struct nf_ct_tcp_flags *attr =
1407 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
1408 		ct->proto.tcp.seen[1].flags &= ~attr->mask;
1409 		ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
1410 	}
1411 
1412 	if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
1413 	    tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
1414 	    ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
1415 	    ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1416 		ct->proto.tcp.seen[0].td_scale =
1417 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
1418 		ct->proto.tcp.seen[1].td_scale =
1419 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1420 	}
1421 	spin_unlock_bh(&ct->lock);
1422 
1423 	return 0;
1424 }
1425 
tcp_nlattr_tuple_size(void)1426 static unsigned int tcp_nlattr_tuple_size(void)
1427 {
1428 	static unsigned int size __read_mostly;
1429 
1430 	if (!size)
1431 		size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1432 
1433 	return size;
1434 }
1435 #endif
1436 
1437 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1438 
1439 #include <linux/netfilter/nfnetlink.h>
1440 #include <linux/netfilter/nfnetlink_cttimeout.h>
1441 
tcp_timeout_nlattr_to_obj(struct nlattr * tb[],struct net * net,void * data)1442 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1443 				     struct net *net, void *data)
1444 {
1445 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
1446 	unsigned int *timeouts = data;
1447 	int i;
1448 
1449 	if (!timeouts)
1450 		timeouts = tn->timeouts;
1451 	/* set default TCP timeouts. */
1452 	for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1453 		timeouts[i] = tn->timeouts[i];
1454 
1455 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1456 		timeouts[TCP_CONNTRACK_SYN_SENT] =
1457 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1458 	}
1459 
1460 	if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1461 		timeouts[TCP_CONNTRACK_SYN_RECV] =
1462 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1463 	}
1464 	if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1465 		timeouts[TCP_CONNTRACK_ESTABLISHED] =
1466 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1467 	}
1468 	if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1469 		timeouts[TCP_CONNTRACK_FIN_WAIT] =
1470 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1471 	}
1472 	if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1473 		timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1474 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1475 	}
1476 	if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1477 		timeouts[TCP_CONNTRACK_LAST_ACK] =
1478 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1479 	}
1480 	if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1481 		timeouts[TCP_CONNTRACK_TIME_WAIT] =
1482 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1483 	}
1484 	if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1485 		timeouts[TCP_CONNTRACK_CLOSE] =
1486 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1487 	}
1488 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1489 		timeouts[TCP_CONNTRACK_SYN_SENT2] =
1490 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1491 	}
1492 	if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1493 		timeouts[TCP_CONNTRACK_RETRANS] =
1494 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1495 	}
1496 	if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1497 		timeouts[TCP_CONNTRACK_UNACK] =
1498 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1499 	}
1500 
1501 	timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
1502 	return 0;
1503 }
1504 
1505 static int
tcp_timeout_obj_to_nlattr(struct sk_buff * skb,const void * data)1506 tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1507 {
1508 	const unsigned int *timeouts = data;
1509 
1510 	if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1511 			htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1512 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1513 			 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1514 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1515 			 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1516 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1517 			 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1518 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1519 			 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1520 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1521 			 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1522 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1523 			 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1524 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1525 			 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1526 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1527 			 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1528 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1529 			 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1530 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1531 			 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1532 		goto nla_put_failure;
1533 	return 0;
1534 
1535 nla_put_failure:
1536 	return -ENOSPC;
1537 }
1538 
1539 static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1540 	[CTA_TIMEOUT_TCP_SYN_SENT]	= { .type = NLA_U32 },
1541 	[CTA_TIMEOUT_TCP_SYN_RECV]	= { .type = NLA_U32 },
1542 	[CTA_TIMEOUT_TCP_ESTABLISHED]	= { .type = NLA_U32 },
1543 	[CTA_TIMEOUT_TCP_FIN_WAIT]	= { .type = NLA_U32 },
1544 	[CTA_TIMEOUT_TCP_CLOSE_WAIT]	= { .type = NLA_U32 },
1545 	[CTA_TIMEOUT_TCP_LAST_ACK]	= { .type = NLA_U32 },
1546 	[CTA_TIMEOUT_TCP_TIME_WAIT]	= { .type = NLA_U32 },
1547 	[CTA_TIMEOUT_TCP_CLOSE]		= { .type = NLA_U32 },
1548 	[CTA_TIMEOUT_TCP_SYN_SENT2]	= { .type = NLA_U32 },
1549 	[CTA_TIMEOUT_TCP_RETRANS]	= { .type = NLA_U32 },
1550 	[CTA_TIMEOUT_TCP_UNACK]		= { .type = NLA_U32 },
1551 };
1552 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1553 
nf_conntrack_tcp_init_net(struct net * net)1554 void nf_conntrack_tcp_init_net(struct net *net)
1555 {
1556 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
1557 	int i;
1558 
1559 	for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1560 		tn->timeouts[i] = tcp_timeouts[i];
1561 
1562 	/* timeouts[0] is unused, make it same as SYN_SENT so
1563 	 * ->timeouts[0] contains 'new' timeout, like udp or icmp.
1564 	 */
1565 	tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
1566 
1567 	/* If it is set to zero, we disable picking up already established
1568 	 * connections.
1569 	 */
1570 	tn->tcp_loose = 1;
1571 
1572 	/* "Be conservative in what you do,
1573 	 *  be liberal in what you accept from others."
1574 	 * If it's non-zero, we mark only out of window RST segments as INVALID.
1575 	 */
1576 	tn->tcp_be_liberal = 0;
1577 
1578 	/* If it's non-zero, we turn off RST sequence number check */
1579 	tn->tcp_ignore_invalid_rst = 0;
1580 
1581 	/* Max number of the retransmitted packets without receiving an (acceptable)
1582 	 * ACK from the destination. If this number is reached, a shorter timer
1583 	 * will be started.
1584 	 */
1585 	tn->tcp_max_retrans = 3;
1586 
1587 #if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
1588 	tn->offload_timeout = 30 * HZ;
1589 #endif
1590 }
1591 
1592 const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
1593 {
1594 	.l4proto 		= IPPROTO_TCP,
1595 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1596 	.print_conntrack 	= tcp_print_conntrack,
1597 #endif
1598 	.can_early_drop		= tcp_can_early_drop,
1599 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1600 	.to_nlattr		= tcp_to_nlattr,
1601 	.from_nlattr		= nlattr_to_tcp,
1602 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
1603 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
1604 	.nlattr_tuple_size	= tcp_nlattr_tuple_size,
1605 	.nlattr_size		= TCP_NLATTR_SIZE,
1606 	.nla_policy		= nf_ct_port_nla_policy,
1607 #endif
1608 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1609 	.ctnl_timeout		= {
1610 		.nlattr_to_obj	= tcp_timeout_nlattr_to_obj,
1611 		.obj_to_nlattr	= tcp_timeout_obj_to_nlattr,
1612 		.nlattr_max	= CTA_TIMEOUT_TCP_MAX,
1613 		.obj_size	= sizeof(unsigned int) *
1614 					TCP_CONNTRACK_TIMEOUT_MAX,
1615 		.nla_policy	= tcp_timeout_nla_policy,
1616 	},
1617 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1618 };
1619