• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* (C) 1999-2001 Paul `Rusty' Russell
3  * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
4  * (C) 2002-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
5  * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
6  */
7 
8 #include <linux/types.h>
9 #include <linux/timer.h>
10 #include <linux/module.h>
11 #include <linux/in.h>
12 #include <linux/tcp.h>
13 #include <linux/spinlock.h>
14 #include <linux/skbuff.h>
15 #include <linux/ipv6.h>
16 #include <net/ip6_checksum.h>
17 #include <asm/unaligned.h>
18 
19 #include <net/tcp.h>
20 
21 #include <linux/netfilter.h>
22 #include <linux/netfilter_ipv4.h>
23 #include <linux/netfilter_ipv6.h>
24 #include <net/netfilter/nf_conntrack.h>
25 #include <net/netfilter/nf_conntrack_l4proto.h>
26 #include <net/netfilter/nf_conntrack_ecache.h>
27 #include <net/netfilter/nf_conntrack_seqadj.h>
28 #include <net/netfilter/nf_conntrack_synproxy.h>
29 #include <net/netfilter/nf_conntrack_timeout.h>
30 #include <net/netfilter/nf_log.h>
31 #include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
32 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
33 
34 /* "Be conservative in what you do,
35     be liberal in what you accept from others."
36     If it's non-zero, we mark only out of window RST segments as INVALID. */
37 static int nf_ct_tcp_be_liberal __read_mostly = 0;
38 
39 /* If it is set to zero, we disable picking up already established
40    connections. */
41 static int nf_ct_tcp_loose __read_mostly = 1;
42 
43 /* Max number of the retransmitted packets without receiving an (acceptable)
44    ACK from the destination. If this number is reached, a shorter timer
45    will be started. */
46 static int nf_ct_tcp_max_retrans __read_mostly = 3;
47 
48   /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
49      closely.  They're more complex. --RR */
50 
51 static const char *const tcp_conntrack_names[] = {
52 	"NONE",
53 	"SYN_SENT",
54 	"SYN_RECV",
55 	"ESTABLISHED",
56 	"FIN_WAIT",
57 	"CLOSE_WAIT",
58 	"LAST_ACK",
59 	"TIME_WAIT",
60 	"CLOSE",
61 	"SYN_SENT2",
62 };
63 
64 #define SECS * HZ
65 #define MINS * 60 SECS
66 #define HOURS * 60 MINS
67 #define DAYS * 24 HOURS
68 
69 static const unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] = {
70 	[TCP_CONNTRACK_SYN_SENT]	= 2 MINS,
71 	[TCP_CONNTRACK_SYN_RECV]	= 60 SECS,
72 	[TCP_CONNTRACK_ESTABLISHED]	= 5 DAYS,
73 	[TCP_CONNTRACK_FIN_WAIT]	= 2 MINS,
74 	[TCP_CONNTRACK_CLOSE_WAIT]	= 60 SECS,
75 	[TCP_CONNTRACK_LAST_ACK]	= 30 SECS,
76 	[TCP_CONNTRACK_TIME_WAIT]	= 2 MINS,
77 	[TCP_CONNTRACK_CLOSE]		= 10 SECS,
78 	[TCP_CONNTRACK_SYN_SENT2]	= 2 MINS,
79 /* RFC1122 says the R2 limit should be at least 100 seconds.
80    Linux uses 15 packets as limit, which corresponds
81    to ~13-30min depending on RTO. */
82 	[TCP_CONNTRACK_RETRANS]		= 5 MINS,
83 	[TCP_CONNTRACK_UNACK]		= 5 MINS,
84 };
85 
86 #define sNO TCP_CONNTRACK_NONE
87 #define sSS TCP_CONNTRACK_SYN_SENT
88 #define sSR TCP_CONNTRACK_SYN_RECV
89 #define sES TCP_CONNTRACK_ESTABLISHED
90 #define sFW TCP_CONNTRACK_FIN_WAIT
91 #define sCW TCP_CONNTRACK_CLOSE_WAIT
92 #define sLA TCP_CONNTRACK_LAST_ACK
93 #define sTW TCP_CONNTRACK_TIME_WAIT
94 #define sCL TCP_CONNTRACK_CLOSE
95 #define sS2 TCP_CONNTRACK_SYN_SENT2
96 #define sIV TCP_CONNTRACK_MAX
97 #define sIG TCP_CONNTRACK_IGNORE
98 
99 /* What TCP flags are set from RST/SYN/FIN/ACK. */
100 enum tcp_bit_set {
101 	TCP_SYN_SET,
102 	TCP_SYNACK_SET,
103 	TCP_FIN_SET,
104 	TCP_ACK_SET,
105 	TCP_RST_SET,
106 	TCP_NONE_SET,
107 };
108 
109 /*
110  * The TCP state transition table needs a few words...
111  *
112  * We are the man in the middle. All the packets go through us
113  * but might get lost in transit to the destination.
114  * It is assumed that the destinations can't receive segments
115  * we haven't seen.
116  *
117  * The checked segment is in window, but our windows are *not*
118  * equivalent with the ones of the sender/receiver. We always
119  * try to guess the state of the current sender.
120  *
121  * The meaning of the states are:
122  *
123  * NONE:	initial state
124  * SYN_SENT:	SYN-only packet seen
125  * SYN_SENT2:	SYN-only packet seen from reply dir, simultaneous open
126  * SYN_RECV:	SYN-ACK packet seen
127  * ESTABLISHED:	ACK packet seen
128  * FIN_WAIT:	FIN packet seen
129  * CLOSE_WAIT:	ACK seen (after FIN)
130  * LAST_ACK:	FIN seen (after FIN)
131  * TIME_WAIT:	last ACK seen
132  * CLOSE:	closed connection (RST)
133  *
134  * Packets marked as IGNORED (sIG):
135  *	if they may be either invalid or valid
136  *	and the receiver may send back a connection
137  *	closing RST or a SYN/ACK.
138  *
139  * Packets marked as INVALID (sIV):
140  *	if we regard them as truly invalid packets
141  */
142 static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
143 	{
144 /* ORIGINAL */
145 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
146 /*syn*/	   { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sS2 },
147 /*
148  *	sNO -> sSS	Initialize a new connection
149  *	sSS -> sSS	Retransmitted SYN
150  *	sS2 -> sS2	Late retransmitted SYN
151  *	sSR -> sIG
152  *	sES -> sIG	Error: SYNs in window outside the SYN_SENT state
153  *			are errors. Receiver will reply with RST
154  *			and close the connection.
155  *			Or we are not in sync and hold a dead connection.
156  *	sFW -> sIG
157  *	sCW -> sIG
158  *	sLA -> sIG
159  *	sTW -> sSS	Reopened connection (RFC 1122).
160  *	sCL -> sSS
161  */
162 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
163 /*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR },
164 /*
165  *	sNO -> sIV	Too late and no reason to do anything
166  *	sSS -> sIV	Client can't send SYN and then SYN/ACK
167  *	sS2 -> sSR	SYN/ACK sent to SYN2 in simultaneous open
168  *	sSR -> sSR	Late retransmitted SYN/ACK in simultaneous open
169  *	sES -> sIV	Invalid SYN/ACK packets sent by the client
170  *	sFW -> sIV
171  *	sCW -> sIV
172  *	sLA -> sIV
173  *	sTW -> sIV
174  *	sCL -> sIV
175  */
176 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
177 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
178 /*
179  *	sNO -> sIV	Too late and no reason to do anything...
180  *	sSS -> sIV	Client migth not send FIN in this state:
181  *			we enforce waiting for a SYN/ACK reply first.
182  *	sS2 -> sIV
183  *	sSR -> sFW	Close started.
184  *	sES -> sFW
185  *	sFW -> sLA	FIN seen in both directions, waiting for
186  *			the last ACK.
187  *			Migth be a retransmitted FIN as well...
188  *	sCW -> sLA
189  *	sLA -> sLA	Retransmitted FIN. Remain in the same state.
190  *	sTW -> sTW
191  *	sCL -> sCL
192  */
193 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
194 /*ack*/	   { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
195 /*
196  *	sNO -> sES	Assumed.
197  *	sSS -> sIV	ACK is invalid: we haven't seen a SYN/ACK yet.
198  *	sS2 -> sIV
199  *	sSR -> sES	Established state is reached.
200  *	sES -> sES	:-)
201  *	sFW -> sCW	Normal close request answered by ACK.
202  *	sCW -> sCW
203  *	sLA -> sTW	Last ACK detected (RFC5961 challenged)
204  *	sTW -> sTW	Retransmitted last ACK. Remain in the same state.
205  *	sCL -> sCL
206  */
207 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
208 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
209 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
210 	},
211 	{
212 /* REPLY */
213 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
214 /*syn*/	   { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
215 /*
216  *	sNO -> sIV	Never reached.
217  *	sSS -> sS2	Simultaneous open
218  *	sS2 -> sS2	Retransmitted simultaneous SYN
219  *	sSR -> sIV	Invalid SYN packets sent by the server
220  *	sES -> sIV
221  *	sFW -> sIV
222  *	sCW -> sIV
223  *	sLA -> sIV
224  *	sTW -> sSS	Reopened connection, but server may have switched role
225  *	sCL -> sIV
226  */
227 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
228 /*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
229 /*
230  *	sSS -> sSR	Standard open.
231  *	sS2 -> sSR	Simultaneous open
232  *	sSR -> sIG	Retransmitted SYN/ACK, ignore it.
233  *	sES -> sIG	Late retransmitted SYN/ACK?
234  *	sFW -> sIG	Might be SYN/ACK answering ignored SYN
235  *	sCW -> sIG
236  *	sLA -> sIG
237  *	sTW -> sIG
238  *	sCL -> sIG
239  */
240 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
241 /*fin*/    { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
242 /*
243  *	sSS -> sIV	Server might not send FIN in this state.
244  *	sS2 -> sIV
245  *	sSR -> sFW	Close started.
246  *	sES -> sFW
247  *	sFW -> sLA	FIN seen in both directions.
248  *	sCW -> sLA
249  *	sLA -> sLA	Retransmitted FIN.
250  *	sTW -> sTW
251  *	sCL -> sCL
252  */
253 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
254 /*ack*/	   { sIV, sIG, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIG },
255 /*
256  *	sSS -> sIG	Might be a half-open connection.
257  *	sS2 -> sIG
258  *	sSR -> sSR	Might answer late resent SYN.
259  *	sES -> sES	:-)
260  *	sFW -> sCW	Normal close request answered by ACK.
261  *	sCW -> sCW
262  *	sLA -> sTW	Last ACK detected (RFC5961 challenged)
263  *	sTW -> sTW	Retransmitted last ACK.
264  *	sCL -> sCL
265  */
266 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
267 /*rst*/    { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL },
268 /*none*/   { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
269 	}
270 };
271 
272 #ifdef CONFIG_NF_CONNTRACK_PROCFS
273 /* Print out the private part of the conntrack. */
tcp_print_conntrack(struct seq_file * s,struct nf_conn * ct)274 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
275 {
276 	if (test_bit(IPS_OFFLOAD_BIT, &ct->status))
277 		return;
278 
279 	seq_printf(s, "%s ", tcp_conntrack_names[ct->proto.tcp.state]);
280 }
281 #endif
282 
get_conntrack_index(const struct tcphdr * tcph)283 static unsigned int get_conntrack_index(const struct tcphdr *tcph)
284 {
285 	if (tcph->rst) return TCP_RST_SET;
286 	else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
287 	else if (tcph->fin) return TCP_FIN_SET;
288 	else if (tcph->ack) return TCP_ACK_SET;
289 	else return TCP_NONE_SET;
290 }
291 
292 /* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
293    in IP Filter' by Guido van Rooij.
294 
295    http://www.sane.nl/events/sane2000/papers.html
296    http://www.darkart.com/mirrors/www.obfuscation.org/ipf/
297 
298    The boundaries and the conditions are changed according to RFC793:
299    the packet must intersect the window (i.e. segments may be
300    after the right or before the left edge) and thus receivers may ACK
301    segments after the right edge of the window.
302 
303 	td_maxend = max(sack + max(win,1)) seen in reply packets
304 	td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
305 	td_maxwin += seq + len - sender.td_maxend
306 			if seq + len > sender.td_maxend
307 	td_end    = max(seq + len) seen in sent packets
308 
309    I.   Upper bound for valid data:	seq <= sender.td_maxend
310    II.  Lower bound for valid data:	seq + len >= sender.td_end - receiver.td_maxwin
311    III.	Upper bound for valid (s)ack:   sack <= receiver.td_end
312    IV.	Lower bound for valid (s)ack:	sack >= receiver.td_end - MAXACKWINDOW
313 
314    where sack is the highest right edge of sack block found in the packet
315    or ack in the case of packet without SACK option.
316 
317    The upper bound limit for a valid (s)ack is not ignored -
318    we doesn't have to deal with fragments.
319 */
320 
segment_seq_plus_len(__u32 seq,size_t len,unsigned int dataoff,const struct tcphdr * tcph)321 static inline __u32 segment_seq_plus_len(__u32 seq,
322 					 size_t len,
323 					 unsigned int dataoff,
324 					 const struct tcphdr *tcph)
325 {
326 	/* XXX Should I use payload length field in IP/IPv6 header ?
327 	 * - YK */
328 	return (seq + len - dataoff - tcph->doff*4
329 		+ (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
330 }
331 
332 /* Fixme: what about big packets? */
333 #define MAXACKWINCONST			66000
334 #define MAXACKWINDOW(sender)						\
335 	((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin	\
336 					      : MAXACKWINCONST)
337 
338 /*
339  * Simplified tcp_parse_options routine from tcp_input.c
340  */
tcp_options(const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,struct ip_ct_tcp_state * state)341 static void tcp_options(const struct sk_buff *skb,
342 			unsigned int dataoff,
343 			const struct tcphdr *tcph,
344 			struct ip_ct_tcp_state *state)
345 {
346 	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
347 	const unsigned char *ptr;
348 	int length = (tcph->doff*4) - sizeof(struct tcphdr);
349 
350 	if (!length)
351 		return;
352 
353 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
354 				 length, buff);
355 	BUG_ON(ptr == NULL);
356 
357 	state->td_scale = 0;
358 	state->flags &= IP_CT_TCP_FLAG_BE_LIBERAL;
359 
360 	while (length > 0) {
361 		int opcode=*ptr++;
362 		int opsize;
363 
364 		switch (opcode) {
365 		case TCPOPT_EOL:
366 			return;
367 		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
368 			length--;
369 			continue;
370 		default:
371 			if (length < 2)
372 				return;
373 			opsize=*ptr++;
374 			if (opsize < 2) /* "silly options" */
375 				return;
376 			if (opsize > length)
377 				return;	/* don't parse partial options */
378 
379 			if (opcode == TCPOPT_SACK_PERM
380 			    && opsize == TCPOLEN_SACK_PERM)
381 				state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
382 			else if (opcode == TCPOPT_WINDOW
383 				 && opsize == TCPOLEN_WINDOW) {
384 				state->td_scale = *(u_int8_t *)ptr;
385 
386 				if (state->td_scale > TCP_MAX_WSCALE)
387 					state->td_scale = TCP_MAX_WSCALE;
388 
389 				state->flags |=
390 					IP_CT_TCP_FLAG_WINDOW_SCALE;
391 			}
392 			ptr += opsize - 2;
393 			length -= opsize;
394 		}
395 	}
396 }
397 
tcp_sack(const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph,__u32 * sack)398 static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
399                      const struct tcphdr *tcph, __u32 *sack)
400 {
401 	unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
402 	const unsigned char *ptr;
403 	int length = (tcph->doff*4) - sizeof(struct tcphdr);
404 	__u32 tmp;
405 
406 	if (!length)
407 		return;
408 
409 	ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
410 				 length, buff);
411 	BUG_ON(ptr == NULL);
412 
413 	/* Fast path for timestamp-only option */
414 	if (length == TCPOLEN_TSTAMP_ALIGNED
415 	    && *(__be32 *)ptr == htonl((TCPOPT_NOP << 24)
416 				       | (TCPOPT_NOP << 16)
417 				       | (TCPOPT_TIMESTAMP << 8)
418 				       | TCPOLEN_TIMESTAMP))
419 		return;
420 
421 	while (length > 0) {
422 		int opcode = *ptr++;
423 		int opsize, i;
424 
425 		switch (opcode) {
426 		case TCPOPT_EOL:
427 			return;
428 		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
429 			length--;
430 			continue;
431 		default:
432 			if (length < 2)
433 				return;
434 			opsize = *ptr++;
435 			if (opsize < 2) /* "silly options" */
436 				return;
437 			if (opsize > length)
438 				return;	/* don't parse partial options */
439 
440 			if (opcode == TCPOPT_SACK
441 			    && opsize >= (TCPOLEN_SACK_BASE
442 					  + TCPOLEN_SACK_PERBLOCK)
443 			    && !((opsize - TCPOLEN_SACK_BASE)
444 				 % TCPOLEN_SACK_PERBLOCK)) {
445 				for (i = 0;
446 				     i < (opsize - TCPOLEN_SACK_BASE);
447 				     i += TCPOLEN_SACK_PERBLOCK) {
448 					tmp = get_unaligned_be32((__be32 *)(ptr+i)+1);
449 
450 					if (after(tmp, *sack))
451 						*sack = tmp;
452 				}
453 				return;
454 			}
455 			ptr += opsize - 2;
456 			length -= opsize;
457 		}
458 	}
459 }
460 
tcp_in_window(const struct nf_conn * ct,struct ip_ct_tcp * state,enum ip_conntrack_dir dir,unsigned int index,const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * tcph)461 static bool tcp_in_window(const struct nf_conn *ct,
462 			  struct ip_ct_tcp *state,
463 			  enum ip_conntrack_dir dir,
464 			  unsigned int index,
465 			  const struct sk_buff *skb,
466 			  unsigned int dataoff,
467 			  const struct tcphdr *tcph)
468 {
469 	struct net *net = nf_ct_net(ct);
470 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
471 	struct ip_ct_tcp_state *sender = &state->seen[dir];
472 	struct ip_ct_tcp_state *receiver = &state->seen[!dir];
473 	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
474 	__u32 seq, ack, sack, end, win, swin;
475 	u16 win_raw;
476 	s32 receiver_offset;
477 	bool res, in_recv_win;
478 
479 	/*
480 	 * Get the required data from the packet.
481 	 */
482 	seq = ntohl(tcph->seq);
483 	ack = sack = ntohl(tcph->ack_seq);
484 	win_raw = ntohs(tcph->window);
485 	win = win_raw;
486 	end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
487 
488 	if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
489 		tcp_sack(skb, dataoff, tcph, &sack);
490 
491 	/* Take into account NAT sequence number mangling */
492 	receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
493 	ack -= receiver_offset;
494 	sack -= receiver_offset;
495 
496 	pr_debug("tcp_in_window: START\n");
497 	pr_debug("tcp_in_window: ");
498 	nf_ct_dump_tuple(tuple);
499 	pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
500 		 seq, ack, receiver_offset, sack, receiver_offset, win, end);
501 	pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
502 		 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
503 		 sender->td_end, sender->td_maxend, sender->td_maxwin,
504 		 sender->td_scale,
505 		 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
506 		 receiver->td_scale);
507 
508 	if (sender->td_maxwin == 0) {
509 		/*
510 		 * Initialize sender data.
511 		 */
512 		if (tcph->syn) {
513 			/*
514 			 * SYN-ACK in reply to a SYN
515 			 * or SYN from reply direction in simultaneous open.
516 			 */
517 			sender->td_end =
518 			sender->td_maxend = end;
519 			sender->td_maxwin = (win == 0 ? 1 : win);
520 
521 			tcp_options(skb, dataoff, tcph, sender);
522 			/*
523 			 * RFC 1323:
524 			 * Both sides must send the Window Scale option
525 			 * to enable window scaling in either direction.
526 			 */
527 			if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
528 			      && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
529 				sender->td_scale =
530 				receiver->td_scale = 0;
531 			if (!tcph->ack)
532 				/* Simultaneous open */
533 				return true;
534 		} else {
535 			/*
536 			 * We are in the middle of a connection,
537 			 * its history is lost for us.
538 			 * Let's try to use the data from the packet.
539 			 */
540 			sender->td_end = end;
541 			swin = win << sender->td_scale;
542 			sender->td_maxwin = (swin == 0 ? 1 : swin);
543 			sender->td_maxend = end + sender->td_maxwin;
544 			if (receiver->td_maxwin == 0) {
545 				/* We haven't seen traffic in the other
546 				 * direction yet but we have to tweak window
547 				 * tracking to pass III and IV until that
548 				 * happens.
549 				 */
550 				receiver->td_end = receiver->td_maxend = sack;
551 			} else if (sack == receiver->td_end + 1) {
552 				/* Likely a reply to a keepalive.
553 				 * Needed for III.
554 				 */
555 				receiver->td_end++;
556 			}
557 
558 		}
559 	} else if (((state->state == TCP_CONNTRACK_SYN_SENT
560 		     && dir == IP_CT_DIR_ORIGINAL)
561 		   || (state->state == TCP_CONNTRACK_SYN_RECV
562 		     && dir == IP_CT_DIR_REPLY))
563 		   && after(end, sender->td_end)) {
564 		/*
565 		 * RFC 793: "if a TCP is reinitialized ... then it need
566 		 * not wait at all; it must only be sure to use sequence
567 		 * numbers larger than those recently used."
568 		 */
569 		sender->td_end =
570 		sender->td_maxend = end;
571 		sender->td_maxwin = (win == 0 ? 1 : win);
572 
573 		tcp_options(skb, dataoff, tcph, sender);
574 	}
575 
576 	if (!(tcph->ack)) {
577 		/*
578 		 * If there is no ACK, just pretend it was set and OK.
579 		 */
580 		ack = sack = receiver->td_end;
581 	} else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
582 		    (TCP_FLAG_ACK|TCP_FLAG_RST))
583 		   && (ack == 0)) {
584 		/*
585 		 * Broken TCP stacks, that set ACK in RST packets as well
586 		 * with zero ack value.
587 		 */
588 		ack = sack = receiver->td_end;
589 	}
590 
591 	if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)
592 		/*
593 		 * RST sent answering SYN.
594 		 */
595 		seq = end = sender->td_end;
596 
597 	pr_debug("tcp_in_window: ");
598 	nf_ct_dump_tuple(tuple);
599 	pr_debug("seq=%u ack=%u+(%d) sack=%u+(%d) win=%u end=%u\n",
600 		 seq, ack, receiver_offset, sack, receiver_offset, win, end);
601 	pr_debug("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
602 		 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
603 		 sender->td_end, sender->td_maxend, sender->td_maxwin,
604 		 sender->td_scale,
605 		 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
606 		 receiver->td_scale);
607 
608 	/* Is the ending sequence in the receive window (if available)? */
609 	in_recv_win = !receiver->td_maxwin ||
610 		      after(end, sender->td_end - receiver->td_maxwin - 1);
611 
612 	pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
613 		 before(seq, sender->td_maxend + 1),
614 		 (in_recv_win ? 1 : 0),
615 		 before(sack, receiver->td_end + 1),
616 		 after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
617 
618 	if (before(seq, sender->td_maxend + 1) &&
619 	    in_recv_win &&
620 	    before(sack, receiver->td_end + 1) &&
621 	    after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
622 		/*
623 		 * Take into account window scaling (RFC 1323).
624 		 */
625 		if (!tcph->syn)
626 			win <<= sender->td_scale;
627 
628 		/*
629 		 * Update sender data.
630 		 */
631 		swin = win + (sack - ack);
632 		if (sender->td_maxwin < swin)
633 			sender->td_maxwin = swin;
634 		if (after(end, sender->td_end)) {
635 			sender->td_end = end;
636 			sender->flags |= IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
637 		}
638 		if (tcph->ack) {
639 			if (!(sender->flags & IP_CT_TCP_FLAG_MAXACK_SET)) {
640 				sender->td_maxack = ack;
641 				sender->flags |= IP_CT_TCP_FLAG_MAXACK_SET;
642 			} else if (after(ack, sender->td_maxack))
643 				sender->td_maxack = ack;
644 		}
645 
646 		/*
647 		 * Update receiver data.
648 		 */
649 		if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
650 			receiver->td_maxwin += end - sender->td_maxend;
651 		if (after(sack + win, receiver->td_maxend - 1)) {
652 			receiver->td_maxend = sack + win;
653 			if (win == 0)
654 				receiver->td_maxend++;
655 		}
656 		if (ack == receiver->td_end)
657 			receiver->flags &= ~IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED;
658 
659 		/*
660 		 * Check retransmissions.
661 		 */
662 		if (index == TCP_ACK_SET) {
663 			if (state->last_dir == dir
664 			    && state->last_seq == seq
665 			    && state->last_ack == ack
666 			    && state->last_end == end
667 			    && state->last_win == win_raw)
668 				state->retrans++;
669 			else {
670 				state->last_dir = dir;
671 				state->last_seq = seq;
672 				state->last_ack = ack;
673 				state->last_end = end;
674 				state->last_win = win_raw;
675 				state->retrans = 0;
676 			}
677 		}
678 		res = true;
679 	} else {
680 		res = false;
681 		if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
682 		    tn->tcp_be_liberal)
683 			res = true;
684 		if (!res) {
685 			nf_ct_l4proto_log_invalid(skb, ct,
686 			"%s",
687 			before(seq, sender->td_maxend + 1) ?
688 			in_recv_win ?
689 			before(sack, receiver->td_end + 1) ?
690 			after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
691 			: "ACK is under the lower bound (possible overly delayed ACK)"
692 			: "ACK is over the upper bound (ACKed data not seen yet)"
693 			: "SEQ is under the lower bound (already ACKed data retransmitted)"
694 			: "SEQ is over the upper bound (over the window of the receiver)");
695 		}
696 	}
697 
698 	pr_debug("tcp_in_window: res=%u sender end=%u maxend=%u maxwin=%u "
699 		 "receiver end=%u maxend=%u maxwin=%u\n",
700 		 res, sender->td_end, sender->td_maxend, sender->td_maxwin,
701 		 receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
702 
703 	return res;
704 }
705 
706 /* table of valid flag combinations - PUSH, ECE and CWR are always valid */
707 static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
708 				 TCPHDR_URG) + 1] =
709 {
710 	[TCPHDR_SYN]				= 1,
711 	[TCPHDR_SYN|TCPHDR_URG]			= 1,
712 	[TCPHDR_SYN|TCPHDR_ACK]			= 1,
713 	[TCPHDR_RST]				= 1,
714 	[TCPHDR_RST|TCPHDR_ACK]			= 1,
715 	[TCPHDR_FIN|TCPHDR_ACK]			= 1,
716 	[TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG]	= 1,
717 	[TCPHDR_ACK]				= 1,
718 	[TCPHDR_ACK|TCPHDR_URG]			= 1,
719 };
720 
tcp_error_log(const struct sk_buff * skb,const struct nf_hook_state * state,const char * msg)721 static void tcp_error_log(const struct sk_buff *skb,
722 			  const struct nf_hook_state *state,
723 			  const char *msg)
724 {
725 	nf_l4proto_log_invalid(skb, state->net, state->pf, IPPROTO_TCP, "%s", msg);
726 }
727 
728 /* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c.  */
tcp_error(const struct tcphdr * th,struct sk_buff * skb,unsigned int dataoff,const struct nf_hook_state * state)729 static bool tcp_error(const struct tcphdr *th,
730 		      struct sk_buff *skb,
731 		      unsigned int dataoff,
732 		      const struct nf_hook_state *state)
733 {
734 	unsigned int tcplen = skb->len - dataoff;
735 	u8 tcpflags;
736 
737 	/* Not whole TCP header or malformed packet */
738 	if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
739 		tcp_error_log(skb, state, "truncated packet");
740 		return true;
741 	}
742 
743 	/* Checksum invalid? Ignore.
744 	 * We skip checking packets on the outgoing path
745 	 * because the checksum is assumed to be correct.
746 	 */
747 	/* FIXME: Source route IP option packets --RR */
748 	if (state->net->ct.sysctl_checksum &&
749 	    state->hook == NF_INET_PRE_ROUTING &&
750 	    nf_checksum(skb, state->hook, dataoff, IPPROTO_TCP, state->pf)) {
751 		tcp_error_log(skb, state, "bad checksum");
752 		return true;
753 	}
754 
755 	/* Check TCP flags. */
756 	tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
757 	if (!tcp_valid_flags[tcpflags]) {
758 		tcp_error_log(skb, state, "invalid tcp flag combination");
759 		return true;
760 	}
761 
762 	return false;
763 }
764 
tcp_new(struct nf_conn * ct,const struct sk_buff * skb,unsigned int dataoff,const struct tcphdr * th)765 static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
766 			     unsigned int dataoff,
767 			     const struct tcphdr *th)
768 {
769 	enum tcp_conntrack new_state;
770 	struct net *net = nf_ct_net(ct);
771 	const struct nf_tcp_net *tn = nf_tcp_pernet(net);
772 	const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
773 	const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
774 
775 	/* Don't need lock here: this conntrack not in circulation yet */
776 	new_state = tcp_conntracks[0][get_conntrack_index(th)][TCP_CONNTRACK_NONE];
777 
778 	/* Invalid: delete conntrack */
779 	if (new_state >= TCP_CONNTRACK_MAX) {
780 		pr_debug("nf_ct_tcp: invalid new deleting.\n");
781 		return false;
782 	}
783 
784 	if (new_state == TCP_CONNTRACK_SYN_SENT) {
785 		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
786 		/* SYN packet */
787 		ct->proto.tcp.seen[0].td_end =
788 			segment_seq_plus_len(ntohl(th->seq), skb->len,
789 					     dataoff, th);
790 		ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
791 		if (ct->proto.tcp.seen[0].td_maxwin == 0)
792 			ct->proto.tcp.seen[0].td_maxwin = 1;
793 		ct->proto.tcp.seen[0].td_maxend =
794 			ct->proto.tcp.seen[0].td_end;
795 
796 		tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
797 	} else if (tn->tcp_loose == 0) {
798 		/* Don't try to pick up connections. */
799 		return false;
800 	} else {
801 		memset(&ct->proto.tcp, 0, sizeof(ct->proto.tcp));
802 		/*
803 		 * We are in the middle of a connection,
804 		 * its history is lost for us.
805 		 * Let's try to use the data from the packet.
806 		 */
807 		ct->proto.tcp.seen[0].td_end =
808 			segment_seq_plus_len(ntohl(th->seq), skb->len,
809 					     dataoff, th);
810 		ct->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
811 		if (ct->proto.tcp.seen[0].td_maxwin == 0)
812 			ct->proto.tcp.seen[0].td_maxwin = 1;
813 		ct->proto.tcp.seen[0].td_maxend =
814 			ct->proto.tcp.seen[0].td_end +
815 			ct->proto.tcp.seen[0].td_maxwin;
816 
817 		/* We assume SACK and liberal window checking to handle
818 		 * window scaling */
819 		ct->proto.tcp.seen[0].flags =
820 		ct->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
821 					      IP_CT_TCP_FLAG_BE_LIBERAL;
822 	}
823 
824 	/* tcp_packet will set them */
825 	ct->proto.tcp.last_index = TCP_NONE_SET;
826 
827 	pr_debug("%s: sender end=%u maxend=%u maxwin=%u scale=%i "
828 		 "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
829 		 __func__,
830 		 sender->td_end, sender->td_maxend, sender->td_maxwin,
831 		 sender->td_scale,
832 		 receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
833 		 receiver->td_scale);
834 	return true;
835 }
836 
nf_conntrack_tcp_established(const struct nf_conn * ct)837 static bool nf_conntrack_tcp_established(const struct nf_conn *ct)
838 {
839 	return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
840 	       test_bit(IPS_ASSURED_BIT, &ct->status);
841 }
842 
nf_ct_tcp_state_reset(struct ip_ct_tcp_state * state)843 static void nf_ct_tcp_state_reset(struct ip_ct_tcp_state *state)
844 {
845 	state->td_end		= 0;
846 	state->td_maxend	= 0;
847 	state->td_maxwin	= 0;
848 	state->td_maxack	= 0;
849 	state->td_scale		= 0;
850 	state->flags		&= IP_CT_TCP_FLAG_BE_LIBERAL;
851 }
852 
853 /* Returns verdict for packet, or -1 for invalid. */
nf_conntrack_tcp_packet(struct nf_conn * ct,struct sk_buff * skb,unsigned int dataoff,enum ip_conntrack_info ctinfo,const struct nf_hook_state * state)854 int nf_conntrack_tcp_packet(struct nf_conn *ct,
855 			    struct sk_buff *skb,
856 			    unsigned int dataoff,
857 			    enum ip_conntrack_info ctinfo,
858 			    const struct nf_hook_state *state)
859 {
860 	struct net *net = nf_ct_net(ct);
861 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
862 	struct nf_conntrack_tuple *tuple;
863 	enum tcp_conntrack new_state, old_state;
864 	unsigned int index, *timeouts;
865 	enum ip_conntrack_dir dir;
866 	const struct tcphdr *th;
867 	struct tcphdr _tcph;
868 	unsigned long timeout;
869 
870 	th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
871 	if (th == NULL)
872 		return -NF_ACCEPT;
873 
874 	if (tcp_error(th, skb, dataoff, state))
875 		return -NF_ACCEPT;
876 
877 	if (!nf_ct_is_confirmed(ct) && !tcp_new(ct, skb, dataoff, th))
878 		return -NF_ACCEPT;
879 
880 	spin_lock_bh(&ct->lock);
881 	old_state = ct->proto.tcp.state;
882 	dir = CTINFO2DIR(ctinfo);
883 	index = get_conntrack_index(th);
884 	new_state = tcp_conntracks[dir][index][old_state];
885 	tuple = &ct->tuplehash[dir].tuple;
886 
887 	switch (new_state) {
888 	case TCP_CONNTRACK_SYN_SENT:
889 		if (old_state < TCP_CONNTRACK_TIME_WAIT)
890 			break;
891 		/* RFC 1122: "When a connection is closed actively,
892 		 * it MUST linger in TIME-WAIT state for a time 2xMSL
893 		 * (Maximum Segment Lifetime). However, it MAY accept
894 		 * a new SYN from the remote TCP to reopen the connection
895 		 * directly from TIME-WAIT state, if..."
896 		 * We ignore the conditions because we are in the
897 		 * TIME-WAIT state anyway.
898 		 *
899 		 * Handle aborted connections: we and the server
900 		 * think there is an existing connection but the client
901 		 * aborts it and starts a new one.
902 		 */
903 		if (((ct->proto.tcp.seen[dir].flags
904 		      | ct->proto.tcp.seen[!dir].flags)
905 		     & IP_CT_TCP_FLAG_CLOSE_INIT)
906 		    || (ct->proto.tcp.last_dir == dir
907 		        && ct->proto.tcp.last_index == TCP_RST_SET)) {
908 			/* Attempt to reopen a closed/aborted connection.
909 			 * Delete this connection and look up again. */
910 			spin_unlock_bh(&ct->lock);
911 
912 			/* Only repeat if we can actually remove the timer.
913 			 * Destruction may already be in progress in process
914 			 * context and we must give it a chance to terminate.
915 			 */
916 			if (nf_ct_kill(ct))
917 				return -NF_REPEAT;
918 			return NF_DROP;
919 		}
920 		fallthrough;
921 	case TCP_CONNTRACK_IGNORE:
922 		/* Ignored packets:
923 		 *
924 		 * Our connection entry may be out of sync, so ignore
925 		 * packets which may signal the real connection between
926 		 * the client and the server.
927 		 *
928 		 * a) SYN in ORIGINAL
929 		 * b) SYN/ACK in REPLY
930 		 * c) ACK in reply direction after initial SYN in original.
931 		 *
932 		 * If the ignored packet is invalid, the receiver will send
933 		 * a RST we'll catch below.
934 		 */
935 		if (index == TCP_SYNACK_SET
936 		    && ct->proto.tcp.last_index == TCP_SYN_SET
937 		    && ct->proto.tcp.last_dir != dir
938 		    && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
939 			/* b) This SYN/ACK acknowledges a SYN that we earlier
940 			 * ignored as invalid. This means that the client and
941 			 * the server are both in sync, while the firewall is
942 			 * not. We get in sync from the previously annotated
943 			 * values.
944 			 */
945 			old_state = TCP_CONNTRACK_SYN_SENT;
946 			new_state = TCP_CONNTRACK_SYN_RECV;
947 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_end =
948 				ct->proto.tcp.last_end;
949 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxend =
950 				ct->proto.tcp.last_end;
951 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_maxwin =
952 				ct->proto.tcp.last_win == 0 ?
953 					1 : ct->proto.tcp.last_win;
954 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
955 				ct->proto.tcp.last_wscale;
956 			ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
957 			ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
958 				ct->proto.tcp.last_flags;
959 			nf_ct_tcp_state_reset(&ct->proto.tcp.seen[dir]);
960 			break;
961 		}
962 		ct->proto.tcp.last_index = index;
963 		ct->proto.tcp.last_dir = dir;
964 		ct->proto.tcp.last_seq = ntohl(th->seq);
965 		ct->proto.tcp.last_end =
966 		    segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
967 		ct->proto.tcp.last_win = ntohs(th->window);
968 
969 		/* a) This is a SYN in ORIGINAL. The client and the server
970 		 * may be in sync but we are not. In that case, we annotate
971 		 * the TCP options and let the packet go through. If it is a
972 		 * valid SYN packet, the server will reply with a SYN/ACK, and
973 		 * then we'll get in sync. Otherwise, the server potentially
974 		 * responds with a challenge ACK if implementing RFC5961.
975 		 */
976 		if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
977 			struct ip_ct_tcp_state seen = {};
978 
979 			ct->proto.tcp.last_flags =
980 			ct->proto.tcp.last_wscale = 0;
981 			tcp_options(skb, dataoff, th, &seen);
982 			if (seen.flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
983 				ct->proto.tcp.last_flags |=
984 					IP_CT_TCP_FLAG_WINDOW_SCALE;
985 				ct->proto.tcp.last_wscale = seen.td_scale;
986 			}
987 			if (seen.flags & IP_CT_TCP_FLAG_SACK_PERM) {
988 				ct->proto.tcp.last_flags |=
989 					IP_CT_TCP_FLAG_SACK_PERM;
990 			}
991 			/* Mark the potential for RFC5961 challenge ACK,
992 			 * this pose a special problem for LAST_ACK state
993 			 * as ACK is intrepretated as ACKing last FIN.
994 			 */
995 			if (old_state == TCP_CONNTRACK_LAST_ACK)
996 				ct->proto.tcp.last_flags |=
997 					IP_CT_EXP_CHALLENGE_ACK;
998 		}
999 		spin_unlock_bh(&ct->lock);
1000 		nf_ct_l4proto_log_invalid(skb, ct, "invalid packet ignored in "
1001 					  "state %s ", tcp_conntrack_names[old_state]);
1002 		return NF_ACCEPT;
1003 	case TCP_CONNTRACK_MAX:
1004 		/* Special case for SYN proxy: when the SYN to the server or
1005 		 * the SYN/ACK from the server is lost, the client may transmit
1006 		 * a keep-alive packet while in SYN_SENT state. This needs to
1007 		 * be associated with the original conntrack entry in order to
1008 		 * generate a new SYN with the correct sequence number.
1009 		 */
1010 		if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
1011 		    index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
1012 		    ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
1013 		    ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
1014 			pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
1015 			spin_unlock_bh(&ct->lock);
1016 			return NF_ACCEPT;
1017 		}
1018 
1019 		/* Invalid packet */
1020 		pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
1021 			 dir, get_conntrack_index(th), old_state);
1022 		spin_unlock_bh(&ct->lock);
1023 		nf_ct_l4proto_log_invalid(skb, ct, "invalid state");
1024 		return -NF_ACCEPT;
1025 	case TCP_CONNTRACK_TIME_WAIT:
1026 		/* RFC5961 compliance cause stack to send "challenge-ACK"
1027 		 * e.g. in response to spurious SYNs.  Conntrack MUST
1028 		 * not believe this ACK is acking last FIN.
1029 		 */
1030 		if (old_state == TCP_CONNTRACK_LAST_ACK &&
1031 		    index == TCP_ACK_SET &&
1032 		    ct->proto.tcp.last_dir != dir &&
1033 		    ct->proto.tcp.last_index == TCP_SYN_SET &&
1034 		    (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
1035 			/* Detected RFC5961 challenge ACK */
1036 			ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
1037 			spin_unlock_bh(&ct->lock);
1038 			nf_ct_l4proto_log_invalid(skb, ct, "challenge-ack ignored");
1039 			return NF_ACCEPT; /* Don't change state */
1040 		}
1041 		break;
1042 	case TCP_CONNTRACK_SYN_SENT2:
1043 		/* tcp_conntracks table is not smart enough to handle
1044 		 * simultaneous open.
1045 		 */
1046 		ct->proto.tcp.last_flags |= IP_CT_TCP_SIMULTANEOUS_OPEN;
1047 		break;
1048 	case TCP_CONNTRACK_SYN_RECV:
1049 		if (dir == IP_CT_DIR_REPLY && index == TCP_ACK_SET &&
1050 		    ct->proto.tcp.last_flags & IP_CT_TCP_SIMULTANEOUS_OPEN)
1051 			new_state = TCP_CONNTRACK_ESTABLISHED;
1052 		break;
1053 	case TCP_CONNTRACK_CLOSE:
1054 		if (index != TCP_RST_SET)
1055 			break;
1056 
1057 		if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
1058 			u32 seq = ntohl(th->seq);
1059 
1060 			if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
1061 				/* Invalid RST  */
1062 				spin_unlock_bh(&ct->lock);
1063 				nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
1064 				return -NF_ACCEPT;
1065 			}
1066 
1067 			if (!nf_conntrack_tcp_established(ct) ||
1068 			    seq == ct->proto.tcp.seen[!dir].td_maxack)
1069 				break;
1070 
1071 			/* Check if rst is part of train, such as
1072 			 *   foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
1073 			 *   foo:80 > bar:4379: R, 235946602:235946602(0)  ack 42
1074 			 */
1075 			if (ct->proto.tcp.last_index == TCP_ACK_SET &&
1076 			    ct->proto.tcp.last_dir == dir &&
1077 			    seq == ct->proto.tcp.last_end)
1078 				break;
1079 
1080 			/* ... RST sequence number doesn't match exactly, keep
1081 			 * established state to allow a possible challenge ACK.
1082 			 */
1083 			new_state = old_state;
1084 		}
1085 		if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
1086 			 && ct->proto.tcp.last_index == TCP_SYN_SET)
1087 			|| (!test_bit(IPS_ASSURED_BIT, &ct->status)
1088 			    && ct->proto.tcp.last_index == TCP_ACK_SET))
1089 		    && ntohl(th->ack_seq) == ct->proto.tcp.last_end) {
1090 			/* RST sent to invalid SYN or ACK we had let through
1091 			 * at a) and c) above:
1092 			 *
1093 			 * a) SYN was in window then
1094 			 * c) we hold a half-open connection.
1095 			 *
1096 			 * Delete our connection entry.
1097 			 * We skip window checking, because packet might ACK
1098 			 * segments we ignored. */
1099 			goto in_window;
1100 		}
1101 		break;
1102 	default:
1103 		/* Keep compilers happy. */
1104 		break;
1105 	}
1106 
1107 	if (!tcp_in_window(ct, &ct->proto.tcp, dir, index,
1108 			   skb, dataoff, th)) {
1109 		spin_unlock_bh(&ct->lock);
1110 		return -NF_ACCEPT;
1111 	}
1112      in_window:
1113 	/* From now on we have got in-window packets */
1114 	ct->proto.tcp.last_index = index;
1115 	ct->proto.tcp.last_dir = dir;
1116 
1117 	pr_debug("tcp_conntracks: ");
1118 	nf_ct_dump_tuple(tuple);
1119 	pr_debug("syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
1120 		 (th->syn ? 1 : 0), (th->ack ? 1 : 0),
1121 		 (th->fin ? 1 : 0), (th->rst ? 1 : 0),
1122 		 old_state, new_state);
1123 
1124 	ct->proto.tcp.state = new_state;
1125 	if (old_state != new_state
1126 	    && new_state == TCP_CONNTRACK_FIN_WAIT)
1127 		ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
1128 
1129 	timeouts = nf_ct_timeout_lookup(ct);
1130 	if (!timeouts)
1131 		timeouts = tn->timeouts;
1132 
1133 	if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
1134 	    timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1135 		timeout = timeouts[TCP_CONNTRACK_RETRANS];
1136 	else if (unlikely(index == TCP_RST_SET))
1137 		timeout = timeouts[TCP_CONNTRACK_CLOSE];
1138 	else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
1139 		 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
1140 		 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
1141 		timeout = timeouts[TCP_CONNTRACK_UNACK];
1142 	else if (ct->proto.tcp.last_win == 0 &&
1143 		 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
1144 		timeout = timeouts[TCP_CONNTRACK_RETRANS];
1145 	else
1146 		timeout = timeouts[new_state];
1147 	spin_unlock_bh(&ct->lock);
1148 
1149 	if (new_state != old_state)
1150 		nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
1151 
1152 	if (!test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1153 		/* If only reply is a RST, we can consider ourselves not to
1154 		   have an established connection: this is a fairly common
1155 		   problem case, so we can delete the conntrack
1156 		   immediately.  --RR */
1157 		if (th->rst) {
1158 			nf_ct_kill_acct(ct, ctinfo, skb);
1159 			return NF_ACCEPT;
1160 		}
1161 
1162 		if (index == TCP_SYN_SET && old_state == TCP_CONNTRACK_SYN_SENT) {
1163 			/* do not renew timeout on SYN retransmit.
1164 			 *
1165 			 * Else port reuse by client or NAT middlebox can keep
1166 			 * entry alive indefinitely (including nat info).
1167 			 */
1168 			return NF_ACCEPT;
1169 		}
1170 
1171 		/* ESTABLISHED without SEEN_REPLY, i.e. mid-connection
1172 		 * pickup with loose=1. Avoid large ESTABLISHED timeout.
1173 		 */
1174 		if (new_state == TCP_CONNTRACK_ESTABLISHED &&
1175 		    timeout > timeouts[TCP_CONNTRACK_UNACK])
1176 			timeout = timeouts[TCP_CONNTRACK_UNACK];
1177 	} else if (!test_bit(IPS_ASSURED_BIT, &ct->status)
1178 		   && (old_state == TCP_CONNTRACK_SYN_RECV
1179 		       || old_state == TCP_CONNTRACK_ESTABLISHED)
1180 		   && new_state == TCP_CONNTRACK_ESTABLISHED) {
1181 		/* Set ASSURED if we see valid ack in ESTABLISHED
1182 		   after SYN_RECV or a valid answer for a picked up
1183 		   connection. */
1184 		set_bit(IPS_ASSURED_BIT, &ct->status);
1185 		nf_conntrack_event_cache(IPCT_ASSURED, ct);
1186 	}
1187 	nf_ct_refresh_acct(ct, ctinfo, skb, timeout);
1188 
1189 	return NF_ACCEPT;
1190 }
1191 
tcp_can_early_drop(const struct nf_conn * ct)1192 static bool tcp_can_early_drop(const struct nf_conn *ct)
1193 {
1194 	switch (ct->proto.tcp.state) {
1195 	case TCP_CONNTRACK_FIN_WAIT:
1196 	case TCP_CONNTRACK_LAST_ACK:
1197 	case TCP_CONNTRACK_TIME_WAIT:
1198 	case TCP_CONNTRACK_CLOSE:
1199 	case TCP_CONNTRACK_CLOSE_WAIT:
1200 		return true;
1201 	default:
1202 		break;
1203 	}
1204 
1205 	return false;
1206 }
1207 
1208 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1209 
1210 #include <linux/netfilter/nfnetlink.h>
1211 #include <linux/netfilter/nfnetlink_conntrack.h>
1212 
tcp_to_nlattr(struct sk_buff * skb,struct nlattr * nla,struct nf_conn * ct)1213 static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1214 			 struct nf_conn *ct)
1215 {
1216 	struct nlattr *nest_parms;
1217 	struct nf_ct_tcp_flags tmp = {};
1218 
1219 	spin_lock_bh(&ct->lock);
1220 	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_TCP);
1221 	if (!nest_parms)
1222 		goto nla_put_failure;
1223 
1224 	if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
1225 	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1226 		       ct->proto.tcp.seen[0].td_scale) ||
1227 	    nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1228 		       ct->proto.tcp.seen[1].td_scale))
1229 		goto nla_put_failure;
1230 
1231 	tmp.flags = ct->proto.tcp.seen[0].flags;
1232 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1233 		    sizeof(struct nf_ct_tcp_flags), &tmp))
1234 		goto nla_put_failure;
1235 
1236 	tmp.flags = ct->proto.tcp.seen[1].flags;
1237 	if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1238 		    sizeof(struct nf_ct_tcp_flags), &tmp))
1239 		goto nla_put_failure;
1240 	spin_unlock_bh(&ct->lock);
1241 
1242 	nla_nest_end(skb, nest_parms);
1243 
1244 	return 0;
1245 
1246 nla_put_failure:
1247 	spin_unlock_bh(&ct->lock);
1248 	return -1;
1249 }
1250 
1251 static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
1252 	[CTA_PROTOINFO_TCP_STATE]	    = { .type = NLA_U8 },
1253 	[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] = { .type = NLA_U8 },
1254 	[CTA_PROTOINFO_TCP_WSCALE_REPLY]    = { .type = NLA_U8 },
1255 	[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]  = { .len = sizeof(struct nf_ct_tcp_flags) },
1256 	[CTA_PROTOINFO_TCP_FLAGS_REPLY]	    = { .len = sizeof(struct nf_ct_tcp_flags) },
1257 };
1258 
1259 #define TCP_NLATTR_SIZE	( \
1260 	NLA_ALIGN(NLA_HDRLEN + 1) + \
1261 	NLA_ALIGN(NLA_HDRLEN + 1) + \
1262 	NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
1263 	NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
1264 
nlattr_to_tcp(struct nlattr * cda[],struct nf_conn * ct)1265 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
1266 {
1267 	struct nlattr *pattr = cda[CTA_PROTOINFO_TCP];
1268 	struct nlattr *tb[CTA_PROTOINFO_TCP_MAX+1];
1269 	int err;
1270 
1271 	/* updates could not contain anything about the private
1272 	 * protocol info, in that case skip the parsing */
1273 	if (!pattr)
1274 		return 0;
1275 
1276 	err = nla_parse_nested_deprecated(tb, CTA_PROTOINFO_TCP_MAX, pattr,
1277 					  tcp_nla_policy, NULL);
1278 	if (err < 0)
1279 		return err;
1280 
1281 	if (tb[CTA_PROTOINFO_TCP_STATE] &&
1282 	    nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]) >= TCP_CONNTRACK_MAX)
1283 		return -EINVAL;
1284 
1285 	spin_lock_bh(&ct->lock);
1286 	if (tb[CTA_PROTOINFO_TCP_STATE])
1287 		ct->proto.tcp.state = nla_get_u8(tb[CTA_PROTOINFO_TCP_STATE]);
1288 
1289 	if (tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]) {
1290 		struct nf_ct_tcp_flags *attr =
1291 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]);
1292 		ct->proto.tcp.seen[0].flags &= ~attr->mask;
1293 		ct->proto.tcp.seen[0].flags |= attr->flags & attr->mask;
1294 	}
1295 
1296 	if (tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]) {
1297 		struct nf_ct_tcp_flags *attr =
1298 			nla_data(tb[CTA_PROTOINFO_TCP_FLAGS_REPLY]);
1299 		ct->proto.tcp.seen[1].flags &= ~attr->mask;
1300 		ct->proto.tcp.seen[1].flags |= attr->flags & attr->mask;
1301 	}
1302 
1303 	if (tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL] &&
1304 	    tb[CTA_PROTOINFO_TCP_WSCALE_REPLY] &&
1305 	    ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
1306 	    ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_WINDOW_SCALE) {
1307 		ct->proto.tcp.seen[0].td_scale =
1308 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
1309 		ct->proto.tcp.seen[1].td_scale =
1310 			nla_get_u8(tb[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
1311 	}
1312 	spin_unlock_bh(&ct->lock);
1313 
1314 	return 0;
1315 }
1316 
tcp_nlattr_tuple_size(void)1317 static unsigned int tcp_nlattr_tuple_size(void)
1318 {
1319 	static unsigned int size __read_mostly;
1320 
1321 	if (!size)
1322 		size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1323 
1324 	return size;
1325 }
1326 #endif
1327 
1328 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1329 
1330 #include <linux/netfilter/nfnetlink.h>
1331 #include <linux/netfilter/nfnetlink_cttimeout.h>
1332 
tcp_timeout_nlattr_to_obj(struct nlattr * tb[],struct net * net,void * data)1333 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
1334 				     struct net *net, void *data)
1335 {
1336 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
1337 	unsigned int *timeouts = data;
1338 	int i;
1339 
1340 	if (!timeouts)
1341 		timeouts = tn->timeouts;
1342 	/* set default TCP timeouts. */
1343 	for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
1344 		timeouts[i] = tn->timeouts[i];
1345 
1346 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
1347 		timeouts[TCP_CONNTRACK_SYN_SENT] =
1348 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
1349 	}
1350 
1351 	if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
1352 		timeouts[TCP_CONNTRACK_SYN_RECV] =
1353 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
1354 	}
1355 	if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
1356 		timeouts[TCP_CONNTRACK_ESTABLISHED] =
1357 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
1358 	}
1359 	if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
1360 		timeouts[TCP_CONNTRACK_FIN_WAIT] =
1361 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
1362 	}
1363 	if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
1364 		timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
1365 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
1366 	}
1367 	if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
1368 		timeouts[TCP_CONNTRACK_LAST_ACK] =
1369 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
1370 	}
1371 	if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
1372 		timeouts[TCP_CONNTRACK_TIME_WAIT] =
1373 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
1374 	}
1375 	if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
1376 		timeouts[TCP_CONNTRACK_CLOSE] =
1377 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
1378 	}
1379 	if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
1380 		timeouts[TCP_CONNTRACK_SYN_SENT2] =
1381 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
1382 	}
1383 	if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
1384 		timeouts[TCP_CONNTRACK_RETRANS] =
1385 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
1386 	}
1387 	if (tb[CTA_TIMEOUT_TCP_UNACK]) {
1388 		timeouts[TCP_CONNTRACK_UNACK] =
1389 			ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
1390 	}
1391 
1392 	timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
1393 	return 0;
1394 }
1395 
1396 static int
tcp_timeout_obj_to_nlattr(struct sk_buff * skb,const void * data)1397 tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1398 {
1399 	const unsigned int *timeouts = data;
1400 
1401 	if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1402 			htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1403 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1404 			 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1405 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1406 			 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1407 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1408 			 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1409 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1410 			 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1411 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1412 			 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1413 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1414 			 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1415 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1416 			 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1417 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1418 			 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1419 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1420 			 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1421 	    nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1422 			 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1423 		goto nla_put_failure;
1424 	return 0;
1425 
1426 nla_put_failure:
1427 	return -ENOSPC;
1428 }
1429 
1430 static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
1431 	[CTA_TIMEOUT_TCP_SYN_SENT]	= { .type = NLA_U32 },
1432 	[CTA_TIMEOUT_TCP_SYN_RECV]	= { .type = NLA_U32 },
1433 	[CTA_TIMEOUT_TCP_ESTABLISHED]	= { .type = NLA_U32 },
1434 	[CTA_TIMEOUT_TCP_FIN_WAIT]	= { .type = NLA_U32 },
1435 	[CTA_TIMEOUT_TCP_CLOSE_WAIT]	= { .type = NLA_U32 },
1436 	[CTA_TIMEOUT_TCP_LAST_ACK]	= { .type = NLA_U32 },
1437 	[CTA_TIMEOUT_TCP_TIME_WAIT]	= { .type = NLA_U32 },
1438 	[CTA_TIMEOUT_TCP_CLOSE]		= { .type = NLA_U32 },
1439 	[CTA_TIMEOUT_TCP_SYN_SENT2]	= { .type = NLA_U32 },
1440 	[CTA_TIMEOUT_TCP_RETRANS]	= { .type = NLA_U32 },
1441 	[CTA_TIMEOUT_TCP_UNACK]		= { .type = NLA_U32 },
1442 };
1443 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1444 
nf_conntrack_tcp_init_net(struct net * net)1445 void nf_conntrack_tcp_init_net(struct net *net)
1446 {
1447 	struct nf_tcp_net *tn = nf_tcp_pernet(net);
1448 	int i;
1449 
1450 	for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
1451 		tn->timeouts[i] = tcp_timeouts[i];
1452 
1453 	/* timeouts[0] is unused, make it same as SYN_SENT so
1454 	 * ->timeouts[0] contains 'new' timeout, like udp or icmp.
1455 	 */
1456 	tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
1457 	tn->tcp_loose = nf_ct_tcp_loose;
1458 	tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
1459 	tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
1460 }
1461 
1462 const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp =
1463 {
1464 	.l4proto 		= IPPROTO_TCP,
1465 #ifdef CONFIG_NF_CONNTRACK_PROCFS
1466 	.print_conntrack 	= tcp_print_conntrack,
1467 #endif
1468 	.can_early_drop		= tcp_can_early_drop,
1469 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1470 	.to_nlattr		= tcp_to_nlattr,
1471 	.from_nlattr		= nlattr_to_tcp,
1472 	.tuple_to_nlattr	= nf_ct_port_tuple_to_nlattr,
1473 	.nlattr_to_tuple	= nf_ct_port_nlattr_to_tuple,
1474 	.nlattr_tuple_size	= tcp_nlattr_tuple_size,
1475 	.nlattr_size		= TCP_NLATTR_SIZE,
1476 	.nla_policy		= nf_ct_port_nla_policy,
1477 #endif
1478 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
1479 	.ctnl_timeout		= {
1480 		.nlattr_to_obj	= tcp_timeout_nlattr_to_obj,
1481 		.obj_to_nlattr	= tcp_timeout_obj_to_nlattr,
1482 		.nlattr_max	= CTA_TIMEOUT_TCP_MAX,
1483 		.obj_size	= sizeof(unsigned int) *
1484 					TCP_CONNTRACK_TIMEOUT_MAX,
1485 		.nla_policy	= tcp_timeout_nla_policy,
1486 	},
1487 #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
1488 };
1489