• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the TCP module.
8  *
9  * Version:	@(#)tcp.h	1.0.5	05/23/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  */
14 #ifndef _TCP_H
15 #define _TCP_H
16 
17 #define FASTRETRANS_DEBUG 1
18 
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
29 
30 #include <net/inet_connection_sock.h>
31 #include <net/inet_timewait_sock.h>
32 #include <net/inet_hashtables.h>
33 #include <net/checksum.h>
34 #include <net/request_sock.h>
35 #include <net/sock_reuseport.h>
36 #include <net/sock.h>
37 #include <net/snmp.h>
38 #include <net/ip.h>
39 #include <net/tcp_states.h>
40 #include <net/inet_ecn.h>
41 #include <net/dst.h>
42 #include <net/mptcp.h>
43 
44 #include <linux/seq_file.h>
45 #include <linux/memcontrol.h>
46 #include <linux/bpf-cgroup.h>
47 #include <linux/siphash.h>
48 
49 extern struct inet_hashinfo tcp_hashinfo;
50 
51 DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
52 int tcp_orphan_count_sum(void);
53 
54 void tcp_time_wait(struct sock *sk, int state, int timeo);
55 
56 #define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
57 #define MAX_TCP_OPTION_SPACE 40
58 #define TCP_MIN_SND_MSS		48
59 #define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
60 
61 /*
62  * Never offer a window over 32767 without using window scaling. Some
63  * poor stacks do signed 16bit maths!
64  */
65 #define MAX_TCP_WINDOW		32767U
66 
67 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
68 #define TCP_MIN_MSS		88U
69 
70 /* The initial MTU to use for probing */
71 #define TCP_BASE_MSS		1024
72 
73 /* probing interval, default to 10 minutes as per RFC4821 */
74 #define TCP_PROBE_INTERVAL	600
75 
76 /* Specify interval when tcp mtu probing will stop */
77 #define TCP_PROBE_THRESHOLD	8
78 
79 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
80 #define TCP_FASTRETRANS_THRESH 3
81 
82 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
83 #define TCP_MAX_QUICKACKS	16U
84 
85 /* Maximal number of window scale according to RFC1323 */
86 #define TCP_MAX_WSCALE		14U
87 
88 /* urg_data states */
89 #define TCP_URG_VALID	0x0100
90 #define TCP_URG_NOTYET	0x0200
91 #define TCP_URG_READ	0x0400
92 
93 #define TCP_RETR1	3	/*
94 				 * This is how many retries it does before it
95 				 * tries to figure out if the gateway is
96 				 * down. Minimal RFC value is 3; it corresponds
97 				 * to ~3sec-8min depending on RTO.
98 				 */
99 
100 #define TCP_RETR2	15	/*
101 				 * This should take at least
102 				 * 90 minutes to time out.
103 				 * RFC1122 says that the limit is 100 sec.
104 				 * 15 is ~13-30min depending on RTO.
105 				 */
106 
107 #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
108 				 * when active opening a connection.
109 				 * RFC1122 says the minimum retry MUST
110 				 * be at least 180secs.  Nevertheless
111 				 * this value is corresponding to
112 				 * 63secs of retransmission with the
113 				 * current initial RTO.
114 				 */
115 
116 #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
117 				 * when passive opening a connection.
118 				 * This is corresponding to 31secs of
119 				 * retransmission with the current
120 				 * initial RTO.
121 				 */
122 
123 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
124 				  * state, about 60 seconds	*/
125 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
126                                  /* BSD style FIN_WAIT2 deadlock breaker.
127 				  * It used to be 3min, new value is 60sec,
128 				  * to combine FIN-WAIT-2 timeout with
129 				  * TIME-WAIT timer.
130 				  */
131 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
132 
133 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
134 #if HZ >= 100
135 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
136 #define TCP_ATO_MIN	((unsigned)(HZ/25))
137 #else
138 #define TCP_DELACK_MIN	4U
139 #define TCP_ATO_MIN	4U
140 #endif
141 #define TCP_RTO_MAX	((unsigned)(120*HZ))
142 #define TCP_RTO_MIN	((unsigned)(HZ/5))
143 #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
144 
145 #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
146 
147 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
148 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
149 						 * used as a fallback RTO for the
150 						 * initial data transmission if no
151 						 * valid RTT sample has been acquired,
152 						 * most likely due to retrans in 3WHS.
153 						 */
154 
155 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
156 					                 * for local resources.
157 					                 */
158 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
159 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
160 #define TCP_KEEPALIVE_INTVL	(75*HZ)
161 
162 #define MAX_TCP_KEEPIDLE	32767
163 #define MAX_TCP_KEEPINTVL	32767
164 #define MAX_TCP_KEEPCNT		127
165 #define MAX_TCP_SYNCNT		127
166 
167 #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
168 
169 #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
170 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
171 					 * after this time. It should be equal
172 					 * (or greater than) TCP_TIMEWAIT_LEN
173 					 * to provide reliability equal to one
174 					 * provided by timewait state.
175 					 */
176 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
177 					 * timestamps. It must be less than
178 					 * minimal timewait lifetime.
179 					 */
180 /*
181  *	TCP option
182  */
183 
184 #define TCPOPT_NOP		1	/* Padding */
185 #define TCPOPT_EOL		0	/* End of options */
186 #define TCPOPT_MSS		2	/* Segment size negotiating */
187 #define TCPOPT_WINDOW		3	/* Window scaling */
188 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
189 #define TCPOPT_SACK             5       /* SACK Block */
190 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
191 #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
192 #define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
193 #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
194 #define TCPOPT_EXP		254	/* Experimental */
195 /* Magic number to be after the option value for sharing TCP
196  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
197  */
198 #define TCPOPT_FASTOPEN_MAGIC	0xF989
199 #define TCPOPT_SMC_MAGIC	0xE2D4C3D9
200 
201 /*
202  *     TCP option lengths
203  */
204 
205 #define TCPOLEN_MSS            4
206 #define TCPOLEN_WINDOW         3
207 #define TCPOLEN_SACK_PERM      2
208 #define TCPOLEN_TIMESTAMP      10
209 #define TCPOLEN_MD5SIG         18
210 #define TCPOLEN_FASTOPEN_BASE  2
211 #define TCPOLEN_EXP_FASTOPEN_BASE  4
212 #define TCPOLEN_EXP_SMC_BASE   6
213 
214 /* But this is what stacks really send out. */
215 #define TCPOLEN_TSTAMP_ALIGNED		12
216 #define TCPOLEN_WSCALE_ALIGNED		4
217 #define TCPOLEN_SACKPERM_ALIGNED	4
218 #define TCPOLEN_SACK_BASE		2
219 #define TCPOLEN_SACK_BASE_ALIGNED	4
220 #define TCPOLEN_SACK_PERBLOCK		8
221 #define TCPOLEN_MD5SIG_ALIGNED		20
222 #define TCPOLEN_MSS_ALIGNED		4
223 #define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
224 
225 /* Flags in tp->nonagle */
226 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
227 #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
228 #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
229 
230 /* TCP thin-stream limits */
231 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
232 
233 /* TCP initial congestion window as per rfc6928 */
234 #define TCP_INIT_CWND		10
235 
236 /* Bit Flags for sysctl_tcp_fastopen */
237 #define	TFO_CLIENT_ENABLE	1
238 #define	TFO_SERVER_ENABLE	2
239 #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
240 
241 /* Accept SYN data w/o any cookie option */
242 #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
243 
244 /* Force enable TFO on all listeners, i.e., not requiring the
245  * TCP_FASTOPEN socket option.
246  */
247 #define	TFO_SERVER_WO_SOCKOPT1	0x400
248 
249 
250 /* sysctl variables for tcp */
251 extern int sysctl_tcp_max_orphans;
252 extern long sysctl_tcp_mem[3];
253 
254 #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
255 #define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
256 #define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
257 
258 extern atomic_long_t tcp_memory_allocated;
259 extern struct percpu_counter tcp_sockets_allocated;
260 extern unsigned long tcp_memory_pressure;
261 
262 /* optimized version of sk_under_memory_pressure() for TCP sockets */
tcp_under_memory_pressure(const struct sock * sk)263 static inline bool tcp_under_memory_pressure(const struct sock *sk)
264 {
265 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
266 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
267 		return true;
268 
269 	return READ_ONCE(tcp_memory_pressure);
270 }
271 /*
272  * The next routines deal with comparing 32 bit unsigned ints
273  * and worry about wraparound (automatic with unsigned arithmetic).
274  */
275 
before(__u32 seq1,__u32 seq2)276 static inline bool before(__u32 seq1, __u32 seq2)
277 {
278         return (__s32)(seq1-seq2) < 0;
279 }
280 #define after(seq2, seq1) 	before(seq1, seq2)
281 
282 /* is s2<=s1<=s3 ? */
between(__u32 seq1,__u32 seq2,__u32 seq3)283 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
284 {
285 	return seq3 - seq2 >= seq1 - seq2;
286 }
287 
tcp_out_of_memory(struct sock * sk)288 static inline bool tcp_out_of_memory(struct sock *sk)
289 {
290 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
291 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
292 		return true;
293 	return false;
294 }
295 
296 void sk_forced_mem_schedule(struct sock *sk, int size);
297 
298 bool tcp_check_oom(struct sock *sk, int shift);
299 
300 
301 extern struct proto tcp_prot;
302 
303 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
304 #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
305 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
306 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
307 
308 void tcp_tasklet_init(void);
309 
310 int tcp_v4_err(struct sk_buff *skb, u32);
311 
312 void tcp_shutdown(struct sock *sk, int how);
313 
314 int tcp_v4_early_demux(struct sk_buff *skb);
315 int tcp_v4_rcv(struct sk_buff *skb);
316 
317 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
318 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
319 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
320 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
321 		 int flags);
322 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
323 			size_t size, int flags);
324 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
325 		 size_t size, int flags);
326 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
327 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
328 	      int size_goal);
329 void tcp_release_cb(struct sock *sk);
330 void tcp_wfree(struct sk_buff *skb);
331 void tcp_write_timer_handler(struct sock *sk);
332 void tcp_delack_timer_handler(struct sock *sk);
333 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
334 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
335 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
336 void tcp_rcv_space_adjust(struct sock *sk);
337 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
338 void tcp_twsk_destructor(struct sock *sk);
339 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
340 			struct pipe_inode_info *pipe, size_t len,
341 			unsigned int flags);
342 
tcp_dec_quickack_mode(struct sock * sk)343 static inline void tcp_dec_quickack_mode(struct sock *sk)
344 {
345 	struct inet_connection_sock *icsk = inet_csk(sk);
346 
347 	if (icsk->icsk_ack.quick) {
348 		/* How many ACKs S/ACKing new data have we sent? */
349 		const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
350 
351 		if (pkts >= icsk->icsk_ack.quick) {
352 			icsk->icsk_ack.quick = 0;
353 			/* Leaving quickack mode we deflate ATO. */
354 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
355 		} else
356 			icsk->icsk_ack.quick -= pkts;
357 	}
358 }
359 
360 #define	TCP_ECN_OK		1
361 #define	TCP_ECN_QUEUE_CWR	2
362 #define	TCP_ECN_DEMAND_CWR	4
363 #define	TCP_ECN_SEEN		8
364 
365 enum tcp_tw_status {
366 	TCP_TW_SUCCESS = 0,
367 	TCP_TW_RST = 1,
368 	TCP_TW_ACK = 2,
369 	TCP_TW_SYN = 3
370 };
371 
372 
373 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
374 					      struct sk_buff *skb,
375 					      const struct tcphdr *th);
376 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
377 			   struct request_sock *req, bool fastopen,
378 			   bool *lost_race);
379 int tcp_child_process(struct sock *parent, struct sock *child,
380 		      struct sk_buff *skb);
381 void tcp_enter_loss(struct sock *sk);
382 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
383 void tcp_clear_retrans(struct tcp_sock *tp);
384 void tcp_update_metrics(struct sock *sk);
385 void tcp_init_metrics(struct sock *sk);
386 void tcp_metrics_init(void);
387 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
388 void __tcp_close(struct sock *sk, long timeout);
389 void tcp_close(struct sock *sk, long timeout);
390 void tcp_init_sock(struct sock *sk);
391 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
392 __poll_t tcp_poll(struct file *file, struct socket *sock,
393 		      struct poll_table_struct *wait);
394 int tcp_getsockopt(struct sock *sk, int level, int optname,
395 		   char __user *optval, int __user *optlen);
396 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
397 		   unsigned int optlen);
398 void tcp_set_keepalive(struct sock *sk, int val);
399 void tcp_syn_ack_timeout(const struct request_sock *req);
400 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
401 		int flags, int *addr_len);
402 int tcp_set_rcvlowat(struct sock *sk, int val);
403 void tcp_data_ready(struct sock *sk);
404 #ifdef CONFIG_MMU
405 int tcp_mmap(struct file *file, struct socket *sock,
406 	     struct vm_area_struct *vma);
407 #endif
408 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
409 		       struct tcp_options_received *opt_rx,
410 		       int estab, struct tcp_fastopen_cookie *foc);
411 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
412 
413 /*
414  *	BPF SKB-less helpers
415  */
416 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
417 			 struct tcphdr *th, u32 *cookie);
418 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
419 			 struct tcphdr *th, u32 *cookie);
420 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
421 			  const struct tcp_request_sock_ops *af_ops,
422 			  struct sock *sk, struct tcphdr *th);
423 /*
424  *	TCP v4 functions exported for the inet6 API
425  */
426 
427 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
428 void tcp_v4_mtu_reduced(struct sock *sk);
429 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
430 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
431 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
432 struct sock *tcp_create_openreq_child(const struct sock *sk,
433 				      struct request_sock *req,
434 				      struct sk_buff *skb);
435 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
436 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
437 				  struct request_sock *req,
438 				  struct dst_entry *dst,
439 				  struct request_sock *req_unhash,
440 				  bool *own_req);
441 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
442 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
443 int tcp_connect(struct sock *sk);
444 enum tcp_synack_type {
445 	TCP_SYNACK_NORMAL,
446 	TCP_SYNACK_FASTOPEN,
447 	TCP_SYNACK_COOKIE,
448 };
449 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
450 				struct request_sock *req,
451 				struct tcp_fastopen_cookie *foc,
452 				enum tcp_synack_type synack_type,
453 				struct sk_buff *syn_skb);
454 int tcp_disconnect(struct sock *sk, int flags);
455 
456 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
457 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
458 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
459 
460 /* From syncookies.c */
461 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
462 				 struct request_sock *req,
463 				 struct dst_entry *dst, u32 tsoff);
464 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
465 		      u32 cookie);
466 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
467 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
468 					    const struct tcp_request_sock_ops *af_ops,
469 					    struct sock *sk, struct sk_buff *skb);
470 #ifdef CONFIG_SYN_COOKIES
471 
472 /* Syncookies use a monotonic timer which increments every 60 seconds.
473  * This counter is used both as a hash input and partially encoded into
474  * the cookie value.  A cookie is only validated further if the delta
475  * between the current counter value and the encoded one is less than this,
476  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
477  * the counter advances immediately after a cookie is generated).
478  */
479 #define MAX_SYNCOOKIE_AGE	2
480 #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
481 #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
482 
483 /* syncookies: remember time of last synqueue overflow
484  * But do not dirty this field too often (once per second is enough)
485  * It is racy as we do not hold a lock, but race is very minor.
486  */
tcp_synq_overflow(const struct sock * sk)487 static inline void tcp_synq_overflow(const struct sock *sk)
488 {
489 	unsigned int last_overflow;
490 	unsigned int now = jiffies;
491 
492 	if (sk->sk_reuseport) {
493 		struct sock_reuseport *reuse;
494 
495 		reuse = rcu_dereference(sk->sk_reuseport_cb);
496 		if (likely(reuse)) {
497 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
498 			if (!time_between32(now, last_overflow,
499 					    last_overflow + HZ))
500 				WRITE_ONCE(reuse->synq_overflow_ts, now);
501 			return;
502 		}
503 	}
504 
505 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
506 	if (!time_between32(now, last_overflow, last_overflow + HZ))
507 		WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
508 }
509 
510 /* syncookies: no recent synqueue overflow on this listening socket? */
tcp_synq_no_recent_overflow(const struct sock * sk)511 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
512 {
513 	unsigned int last_overflow;
514 	unsigned int now = jiffies;
515 
516 	if (sk->sk_reuseport) {
517 		struct sock_reuseport *reuse;
518 
519 		reuse = rcu_dereference(sk->sk_reuseport_cb);
520 		if (likely(reuse)) {
521 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
522 			return !time_between32(now, last_overflow - HZ,
523 					       last_overflow +
524 					       TCP_SYNCOOKIE_VALID);
525 		}
526 	}
527 
528 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
529 
530 	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
531 	 * then we're under synflood. However, we have to use
532 	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
533 	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
534 	 * jiffies but before we store .ts_recent_stamp into last_overflow,
535 	 * which could lead to rejecting a valid syncookie.
536 	 */
537 	return !time_between32(now, last_overflow - HZ,
538 			       last_overflow + TCP_SYNCOOKIE_VALID);
539 }
540 
tcp_cookie_time(void)541 static inline u32 tcp_cookie_time(void)
542 {
543 	u64 val = get_jiffies_64();
544 
545 	do_div(val, TCP_SYNCOOKIE_PERIOD);
546 	return val;
547 }
548 
549 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
550 			      u16 *mssp);
551 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
552 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
553 bool cookie_timestamp_decode(const struct net *net,
554 			     struct tcp_options_received *opt);
555 bool cookie_ecn_ok(const struct tcp_options_received *opt,
556 		   const struct net *net, const struct dst_entry *dst);
557 
558 /* From net/ipv6/syncookies.c */
559 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
560 		      u32 cookie);
561 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
562 
563 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
564 			      const struct tcphdr *th, u16 *mssp);
565 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
566 #endif
567 /* tcp_output.c */
568 
569 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
570 			       int nonagle);
571 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
572 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
573 void tcp_retransmit_timer(struct sock *sk);
574 void tcp_xmit_retransmit_queue(struct sock *);
575 void tcp_simple_retransmit(struct sock *);
576 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
577 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
578 enum tcp_queue {
579 	TCP_FRAG_IN_WRITE_QUEUE,
580 	TCP_FRAG_IN_RTX_QUEUE,
581 };
582 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
583 		 struct sk_buff *skb, u32 len,
584 		 unsigned int mss_now, gfp_t gfp);
585 
586 void tcp_send_probe0(struct sock *);
587 void tcp_send_partial(struct sock *);
588 int tcp_write_wakeup(struct sock *, int mib);
589 void tcp_send_fin(struct sock *sk);
590 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
591 int tcp_send_synack(struct sock *);
592 void tcp_push_one(struct sock *, unsigned int mss_now);
593 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
594 void tcp_send_ack(struct sock *sk);
595 void tcp_send_delayed_ack(struct sock *sk);
596 void tcp_send_loss_probe(struct sock *sk);
597 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
598 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
599 			     const struct sk_buff *next_skb);
600 
601 /* tcp_input.c */
602 void tcp_rearm_rto(struct sock *sk);
603 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
604 void tcp_reset(struct sock *sk);
605 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
606 void tcp_fin(struct sock *sk);
607 void tcp_check_space(struct sock *sk);
608 
609 /* tcp_timer.c */
610 void tcp_init_xmit_timers(struct sock *);
tcp_clear_xmit_timers(struct sock * sk)611 static inline void tcp_clear_xmit_timers(struct sock *sk)
612 {
613 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
614 		__sock_put(sk);
615 
616 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
617 		__sock_put(sk);
618 
619 	inet_csk_clear_xmit_timers(sk);
620 }
621 
622 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
623 unsigned int tcp_current_mss(struct sock *sk);
624 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
625 
626 /* Bound MSS / TSO packet size with the half of the window */
tcp_bound_to_half_wnd(struct tcp_sock * tp,int pktsize)627 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
628 {
629 	int cutoff;
630 
631 	/* When peer uses tiny windows, there is no use in packetizing
632 	 * to sub-MSS pieces for the sake of SWS or making sure there
633 	 * are enough packets in the pipe for fast recovery.
634 	 *
635 	 * On the other hand, for extremely large MSS devices, handling
636 	 * smaller than MSS windows in this way does make sense.
637 	 */
638 	if (tp->max_window > TCP_MSS_DEFAULT)
639 		cutoff = (tp->max_window >> 1);
640 	else
641 		cutoff = tp->max_window;
642 
643 	if (cutoff && pktsize > cutoff)
644 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
645 	else
646 		return pktsize;
647 }
648 
649 /* tcp.c */
650 void tcp_get_info(struct sock *, struct tcp_info *);
651 
652 /* Read 'sendfile()'-style from a TCP socket */
653 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
654 		  sk_read_actor_t recv_actor);
655 
656 void tcp_initialize_rcv_mss(struct sock *sk);
657 
658 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
659 int tcp_mss_to_mtu(struct sock *sk, int mss);
660 void tcp_mtup_init(struct sock *sk);
661 
tcp_bound_rto(const struct sock * sk)662 static inline void tcp_bound_rto(const struct sock *sk)
663 {
664 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
665 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
666 }
667 
__tcp_set_rto(const struct tcp_sock * tp)668 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
669 {
670 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
671 }
672 
__tcp_fast_path_on(struct tcp_sock * tp,u32 snd_wnd)673 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
674 {
675 	/* mptcp hooks are only on the slow path */
676 	if (sk_is_mptcp((struct sock *)tp))
677 		return;
678 
679 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
680 			       ntohl(TCP_FLAG_ACK) |
681 			       snd_wnd);
682 }
683 
tcp_fast_path_on(struct tcp_sock * tp)684 static inline void tcp_fast_path_on(struct tcp_sock *tp)
685 {
686 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
687 }
688 
tcp_fast_path_check(struct sock * sk)689 static inline void tcp_fast_path_check(struct sock *sk)
690 {
691 	struct tcp_sock *tp = tcp_sk(sk);
692 
693 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
694 	    tp->rcv_wnd &&
695 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
696 	    !tp->urg_data)
697 		tcp_fast_path_on(tp);
698 }
699 
700 /* Compute the actual rto_min value */
tcp_rto_min(struct sock * sk)701 static inline u32 tcp_rto_min(struct sock *sk)
702 {
703 	const struct dst_entry *dst = __sk_dst_get(sk);
704 	u32 rto_min = inet_csk(sk)->icsk_rto_min;
705 
706 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
707 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
708 	return rto_min;
709 }
710 
tcp_rto_min_us(struct sock * sk)711 static inline u32 tcp_rto_min_us(struct sock *sk)
712 {
713 	return jiffies_to_usecs(tcp_rto_min(sk));
714 }
715 
tcp_ca_dst_locked(const struct dst_entry * dst)716 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
717 {
718 	return dst_metric_locked(dst, RTAX_CC_ALGO);
719 }
720 
721 /* Minimum RTT in usec. ~0 means not available. */
tcp_min_rtt(const struct tcp_sock * tp)722 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
723 {
724 	return minmax_get(&tp->rtt_min);
725 }
726 
727 /* Compute the actual receive window we are currently advertising.
728  * Rcv_nxt can be after the window if our peer push more data
729  * than the offered window.
730  */
tcp_receive_window(const struct tcp_sock * tp)731 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
732 {
733 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
734 
735 	if (win < 0)
736 		win = 0;
737 	return (u32) win;
738 }
739 
740 /* Choose a new window, without checks for shrinking, and without
741  * scaling applied to the result.  The caller does these things
742  * if necessary.  This is a "raw" window selection.
743  */
744 u32 __tcp_select_window(struct sock *sk);
745 
746 void tcp_send_window_probe(struct sock *sk);
747 
748 /* TCP uses 32bit jiffies to save some space.
749  * Note that this is different from tcp_time_stamp, which
750  * historically has been the same until linux-4.13.
751  */
752 #define tcp_jiffies32 ((u32)jiffies)
753 
754 /*
755  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
756  * It is no longer tied to jiffies, but to 1 ms clock.
757  * Note: double check if you want to use tcp_jiffies32 instead of this.
758  */
759 #define TCP_TS_HZ	1000
760 
tcp_clock_ns(void)761 static inline u64 tcp_clock_ns(void)
762 {
763 	return ktime_get_ns();
764 }
765 
tcp_clock_us(void)766 static inline u64 tcp_clock_us(void)
767 {
768 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
769 }
770 
771 /* This should only be used in contexts where tp->tcp_mstamp is up to date */
tcp_time_stamp(const struct tcp_sock * tp)772 static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
773 {
774 	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
775 }
776 
777 /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
tcp_ns_to_ts(u64 ns)778 static inline u64 tcp_ns_to_ts(u64 ns)
779 {
780 	return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
781 }
782 
783 /* Could use tcp_clock_us() / 1000, but this version uses a single divide */
tcp_time_stamp_raw(void)784 static inline u32 tcp_time_stamp_raw(void)
785 {
786 	return tcp_ns_to_ts(tcp_clock_ns());
787 }
788 
789 void tcp_mstamp_refresh(struct tcp_sock *tp);
790 
tcp_stamp_us_delta(u64 t1,u64 t0)791 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
792 {
793 	return max_t(s64, t1 - t0, 0);
794 }
795 
tcp_skb_timestamp(const struct sk_buff * skb)796 static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
797 {
798 	return tcp_ns_to_ts(skb->skb_mstamp_ns);
799 }
800 
801 /* provide the departure time in us unit */
tcp_skb_timestamp_us(const struct sk_buff * skb)802 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
803 {
804 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
805 }
806 
807 
808 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
809 
810 #define TCPHDR_FIN 0x01
811 #define TCPHDR_SYN 0x02
812 #define TCPHDR_RST 0x04
813 #define TCPHDR_PSH 0x08
814 #define TCPHDR_ACK 0x10
815 #define TCPHDR_URG 0x20
816 #define TCPHDR_ECE 0x40
817 #define TCPHDR_CWR 0x80
818 
819 #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
820 
821 /* This is what the send packet queuing engine uses to pass
822  * TCP per-packet control information to the transmission code.
823  * We also store the host-order sequence numbers in here too.
824  * This is 44 bytes if IPV6 is enabled.
825  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
826  */
827 struct tcp_skb_cb {
828 	__u32		seq;		/* Starting sequence number	*/
829 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
830 	union {
831 		/* Note : tcp_tw_isn is used in input path only
832 		 *	  (isn chosen by tcp_timewait_state_process())
833 		 *
834 		 * 	  tcp_gso_segs/size are used in write queue only,
835 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
836 		 */
837 		__u32		tcp_tw_isn;
838 		struct {
839 			u16	tcp_gso_segs;
840 			u16	tcp_gso_size;
841 		};
842 	};
843 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
844 
845 	__u8		sacked;		/* State flags for SACK.	*/
846 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
847 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
848 #define TCPCB_LOST		0x04	/* SKB is lost			*/
849 #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
850 #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp_ns)	*/
851 #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
852 #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
853 				TCPCB_REPAIRED)
854 
855 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
856 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
857 			eor:1,		/* Is skb MSG_EOR marked? */
858 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
859 			unused:5;
860 	__u32		ack_seq;	/* Sequence number ACK'd	*/
861 	union {
862 		struct {
863 			/* There is space for up to 24 bytes */
864 			__u32 in_flight:30,/* Bytes in flight at transmit */
865 			      is_app_limited:1, /* cwnd not fully used? */
866 			      unused:1;
867 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
868 			__u32 delivered;
869 			/* start of send pipeline phase */
870 			u64 first_tx_mstamp;
871 			/* when we reached the "delivered" count */
872 			u64 delivered_mstamp;
873 		} tx;   /* only used for outgoing skbs */
874 		union {
875 			struct inet_skb_parm	h4;
876 #if IS_ENABLED(CONFIG_IPV6)
877 			struct inet6_skb_parm	h6;
878 #endif
879 		} header;	/* For incoming skbs */
880 		struct {
881 			__u32 flags;
882 			struct sock *sk_redir;
883 			void *data_end;
884 		} bpf;
885 	};
886 };
887 
888 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
889 
bpf_compute_data_end_sk_skb(struct sk_buff * skb)890 static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
891 {
892 	TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
893 }
894 
tcp_skb_bpf_ingress(const struct sk_buff * skb)895 static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
896 {
897 	return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
898 }
899 
tcp_skb_bpf_redirect_fetch(struct sk_buff * skb)900 static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
901 {
902 	return TCP_SKB_CB(skb)->bpf.sk_redir;
903 }
904 
tcp_skb_bpf_redirect_clear(struct sk_buff * skb)905 static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
906 {
907 	TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
908 }
909 
910 extern const struct inet_connection_sock_af_ops ipv4_specific;
911 
912 #if IS_ENABLED(CONFIG_IPV6)
913 /* This is the variant of inet6_iif() that must be used by TCP,
914  * as TCP moves IP6CB into a different location in skb->cb[]
915  */
tcp_v6_iif(const struct sk_buff * skb)916 static inline int tcp_v6_iif(const struct sk_buff *skb)
917 {
918 	return TCP_SKB_CB(skb)->header.h6.iif;
919 }
920 
tcp_v6_iif_l3_slave(const struct sk_buff * skb)921 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
922 {
923 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
924 
925 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
926 }
927 
928 /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v6_sdif(const struct sk_buff * skb)929 static inline int tcp_v6_sdif(const struct sk_buff *skb)
930 {
931 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
932 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
933 		return TCP_SKB_CB(skb)->header.h6.iif;
934 #endif
935 	return 0;
936 }
937 
938 extern const struct inet_connection_sock_af_ops ipv6_specific;
939 
940 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
941 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
942 void tcp_v6_early_demux(struct sk_buff *skb);
943 
944 #endif
945 
946 /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v4_sdif(struct sk_buff * skb)947 static inline int tcp_v4_sdif(struct sk_buff *skb)
948 {
949 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
950 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
951 		return TCP_SKB_CB(skb)->header.h4.iif;
952 #endif
953 	return 0;
954 }
955 
956 /* Due to TSO, an SKB can be composed of multiple actual
957  * packets.  To keep these tracked properly, we use this.
958  */
tcp_skb_pcount(const struct sk_buff * skb)959 static inline int tcp_skb_pcount(const struct sk_buff *skb)
960 {
961 	return TCP_SKB_CB(skb)->tcp_gso_segs;
962 }
963 
tcp_skb_pcount_set(struct sk_buff * skb,int segs)964 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
965 {
966 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
967 }
968 
tcp_skb_pcount_add(struct sk_buff * skb,int segs)969 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
970 {
971 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
972 }
973 
974 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
tcp_skb_mss(const struct sk_buff * skb)975 static inline int tcp_skb_mss(const struct sk_buff *skb)
976 {
977 	return TCP_SKB_CB(skb)->tcp_gso_size;
978 }
979 
tcp_skb_can_collapse_to(const struct sk_buff * skb)980 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
981 {
982 	return likely(!TCP_SKB_CB(skb)->eor);
983 }
984 
tcp_skb_can_collapse(const struct sk_buff * to,const struct sk_buff * from)985 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
986 					const struct sk_buff *from)
987 {
988 	return likely(tcp_skb_can_collapse_to(to) &&
989 		      mptcp_skb_can_collapse(to, from));
990 }
991 
992 /* Events passed to congestion control interface */
993 enum tcp_ca_event {
994 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
995 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
996 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
997 	CA_EVENT_LOSS,		/* loss timeout */
998 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
999 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
1000 };
1001 
1002 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1003 enum tcp_ca_ack_event_flags {
1004 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
1005 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
1006 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
1007 };
1008 
1009 /*
1010  * Interface for adding new TCP congestion control handlers
1011  */
1012 #define TCP_CA_NAME_MAX	16
1013 #define TCP_CA_MAX	128
1014 #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1015 
1016 #define TCP_CA_UNSPEC	0
1017 
1018 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1019 #define TCP_CONG_NON_RESTRICTED 0x1
1020 /* Requires ECN/ECT set on all packets */
1021 #define TCP_CONG_NEEDS_ECN	0x2
1022 #define TCP_CONG_MASK	(TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1023 
1024 union tcp_cc_info;
1025 
1026 struct ack_sample {
1027 	u32 pkts_acked;
1028 	s32 rtt_us;
1029 	u32 in_flight;
1030 };
1031 
1032 /* A rate sample measures the number of (original/retransmitted) data
1033  * packets delivered "delivered" over an interval of time "interval_us".
1034  * The tcp_rate.c code fills in the rate sample, and congestion
1035  * control modules that define a cong_control function to run at the end
1036  * of ACK processing can optionally chose to consult this sample when
1037  * setting cwnd and pacing rate.
1038  * A sample is invalid if "delivered" or "interval_us" is negative.
1039  */
1040 struct rate_sample {
1041 	u64  prior_mstamp; /* starting timestamp for interval */
1042 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1043 	s32  delivered;		/* number of packets delivered over interval */
1044 	long interval_us;	/* time for tp->delivered to incr "delivered" */
1045 	u32 snd_interval_us;	/* snd interval for delivered packets */
1046 	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1047 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1048 	int  losses;		/* number of packets marked lost upon ACK */
1049 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1050 	u32  prior_in_flight;	/* in flight before this ACK */
1051 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1052 	bool is_retrans;	/* is sample from retransmission? */
1053 	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1054 };
1055 
1056 struct tcp_congestion_ops {
1057 	struct list_head	list;
1058 	u32 key;
1059 	u32 flags;
1060 
1061 	/* initialize private data (optional) */
1062 	void (*init)(struct sock *sk);
1063 	/* cleanup private data  (optional) */
1064 	void (*release)(struct sock *sk);
1065 
1066 	/* return slow start threshold (required) */
1067 	u32 (*ssthresh)(struct sock *sk);
1068 	/* do new cwnd calculation (required) */
1069 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1070 	/* call before changing ca_state (optional) */
1071 	void (*set_state)(struct sock *sk, u8 new_state);
1072 	/* call when cwnd event occurs (optional) */
1073 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1074 	/* call when ack arrives (optional) */
1075 	void (*in_ack_event)(struct sock *sk, u32 flags);
1076 	/* new value of cwnd after loss (required) */
1077 	u32  (*undo_cwnd)(struct sock *sk);
1078 	/* hook for packet ack accounting (optional) */
1079 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1080 	/* override sysctl_tcp_min_tso_segs */
1081 	u32 (*min_tso_segs)(struct sock *sk);
1082 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1083 	u32 (*sndbuf_expand)(struct sock *sk);
1084 	/* call when packets are delivered to update cwnd and pacing rate,
1085 	 * after all the ca_state processing. (optional)
1086 	 */
1087 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1088 	/* get info for inet_diag (optional) */
1089 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1090 			   union tcp_cc_info *info);
1091 
1092 	char 		name[TCP_CA_NAME_MAX];
1093 	struct module 	*owner;
1094 };
1095 
1096 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1097 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1098 
1099 void tcp_assign_congestion_control(struct sock *sk);
1100 void tcp_init_congestion_control(struct sock *sk);
1101 void tcp_cleanup_congestion_control(struct sock *sk);
1102 int tcp_set_default_congestion_control(struct net *net, const char *name);
1103 void tcp_get_default_congestion_control(struct net *net, char *name);
1104 void tcp_get_available_congestion_control(char *buf, size_t len);
1105 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1106 int tcp_set_allowed_congestion_control(char *allowed);
1107 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1108 			       bool cap_net_admin);
1109 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1110 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1111 
1112 u32 tcp_reno_ssthresh(struct sock *sk);
1113 u32 tcp_reno_undo_cwnd(struct sock *sk);
1114 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1115 extern struct tcp_congestion_ops tcp_reno;
1116 
1117 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1118 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1119 u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1120 #ifdef CONFIG_INET
1121 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1122 #else
tcp_ca_get_name_by_key(u32 key,char * buffer)1123 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1124 {
1125 	return NULL;
1126 }
1127 #endif
1128 
tcp_ca_needs_ecn(const struct sock * sk)1129 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1130 {
1131 	const struct inet_connection_sock *icsk = inet_csk(sk);
1132 
1133 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1134 }
1135 
tcp_set_ca_state(struct sock * sk,const u8 ca_state)1136 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1137 {
1138 	struct inet_connection_sock *icsk = inet_csk(sk);
1139 
1140 	if (icsk->icsk_ca_ops->set_state)
1141 		icsk->icsk_ca_ops->set_state(sk, ca_state);
1142 	icsk->icsk_ca_state = ca_state;
1143 }
1144 
tcp_ca_event(struct sock * sk,const enum tcp_ca_event event)1145 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1146 {
1147 	const struct inet_connection_sock *icsk = inet_csk(sk);
1148 
1149 	if (icsk->icsk_ca_ops->cwnd_event)
1150 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1151 }
1152 
1153 /* From tcp_rate.c */
1154 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1155 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1156 			    struct rate_sample *rs);
1157 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1158 		  bool is_sack_reneg, struct rate_sample *rs);
1159 void tcp_rate_check_app_limited(struct sock *sk);
1160 
1161 /* These functions determine how the current flow behaves in respect of SACK
1162  * handling. SACK is negotiated with the peer, and therefore it can vary
1163  * between different flows.
1164  *
1165  * tcp_is_sack - SACK enabled
1166  * tcp_is_reno - No SACK
1167  */
tcp_is_sack(const struct tcp_sock * tp)1168 static inline int tcp_is_sack(const struct tcp_sock *tp)
1169 {
1170 	return likely(tp->rx_opt.sack_ok);
1171 }
1172 
tcp_is_reno(const struct tcp_sock * tp)1173 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1174 {
1175 	return !tcp_is_sack(tp);
1176 }
1177 
tcp_left_out(const struct tcp_sock * tp)1178 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1179 {
1180 	return tp->sacked_out + tp->lost_out;
1181 }
1182 
1183 /* This determines how many packets are "in the network" to the best
1184  * of our knowledge.  In many cases it is conservative, but where
1185  * detailed information is available from the receiver (via SACK
1186  * blocks etc.) we can make more aggressive calculations.
1187  *
1188  * Use this for decisions involving congestion control, use just
1189  * tp->packets_out to determine if the send queue is empty or not.
1190  *
1191  * Read this equation as:
1192  *
1193  *	"Packets sent once on transmission queue" MINUS
1194  *	"Packets left network, but not honestly ACKed yet" PLUS
1195  *	"Packets fast retransmitted"
1196  */
tcp_packets_in_flight(const struct tcp_sock * tp)1197 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1198 {
1199 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1200 }
1201 
1202 #define TCP_INFINITE_SSTHRESH	0x7fffffff
1203 
tcp_in_slow_start(const struct tcp_sock * tp)1204 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1205 {
1206 	return tp->snd_cwnd < tp->snd_ssthresh;
1207 }
1208 
tcp_in_initial_slowstart(const struct tcp_sock * tp)1209 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1210 {
1211 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1212 }
1213 
tcp_in_cwnd_reduction(const struct sock * sk)1214 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1215 {
1216 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1217 	       (1 << inet_csk(sk)->icsk_ca_state);
1218 }
1219 
1220 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1221  * The exception is cwnd reduction phase, when cwnd is decreasing towards
1222  * ssthresh.
1223  */
tcp_current_ssthresh(const struct sock * sk)1224 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1225 {
1226 	const struct tcp_sock *tp = tcp_sk(sk);
1227 
1228 	if (tcp_in_cwnd_reduction(sk))
1229 		return tp->snd_ssthresh;
1230 	else
1231 		return max(tp->snd_ssthresh,
1232 			   ((tp->snd_cwnd >> 1) +
1233 			    (tp->snd_cwnd >> 2)));
1234 }
1235 
1236 /* Use define here intentionally to get WARN_ON location shown at the caller */
1237 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1238 
1239 void tcp_enter_cwr(struct sock *sk);
1240 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1241 
1242 /* The maximum number of MSS of available cwnd for which TSO defers
1243  * sending if not using sysctl_tcp_tso_win_divisor.
1244  */
tcp_max_tso_deferred_mss(const struct tcp_sock * tp)1245 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1246 {
1247 	return 3;
1248 }
1249 
1250 /* Returns end sequence number of the receiver's advertised window */
tcp_wnd_end(const struct tcp_sock * tp)1251 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1252 {
1253 	return tp->snd_una + tp->snd_wnd;
1254 }
1255 
1256 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1257  * flexible approach. The RFC suggests cwnd should not be raised unless
1258  * it was fully used previously. And that's exactly what we do in
1259  * congestion avoidance mode. But in slow start we allow cwnd to grow
1260  * as long as the application has used half the cwnd.
1261  * Example :
1262  *    cwnd is 10 (IW10), but application sends 9 frames.
1263  *    We allow cwnd to reach 18 when all frames are ACKed.
1264  * This check is safe because it's as aggressive as slow start which already
1265  * risks 100% overshoot. The advantage is that we discourage application to
1266  * either send more filler packets or data to artificially blow up the cwnd
1267  * usage, and allow application-limited process to probe bw more aggressively.
1268  */
tcp_is_cwnd_limited(const struct sock * sk)1269 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1270 {
1271 	const struct tcp_sock *tp = tcp_sk(sk);
1272 
1273 	if (tp->is_cwnd_limited)
1274 		return true;
1275 
1276 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1277 	if (tcp_in_slow_start(tp))
1278 		return tp->snd_cwnd < 2 * tp->max_packets_out;
1279 
1280 	return false;
1281 }
1282 
1283 /* BBR congestion control needs pacing.
1284  * Same remark for SO_MAX_PACING_RATE.
1285  * sch_fq packet scheduler is efficiently handling pacing,
1286  * but is not always installed/used.
1287  * Return true if TCP stack should pace packets itself.
1288  */
tcp_needs_internal_pacing(const struct sock * sk)1289 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1290 {
1291 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1292 }
1293 
1294 /* Estimates in how many jiffies next packet for this flow can be sent.
1295  * Scheduling a retransmit timer too early would be silly.
1296  */
tcp_pacing_delay(const struct sock * sk)1297 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1298 {
1299 	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1300 
1301 	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1302 }
1303 
tcp_reset_xmit_timer(struct sock * sk,const int what,unsigned long when,const unsigned long max_when)1304 static inline void tcp_reset_xmit_timer(struct sock *sk,
1305 					const int what,
1306 					unsigned long when,
1307 					const unsigned long max_when)
1308 {
1309 	inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1310 				  max_when);
1311 }
1312 
1313 /* Something is really bad, we could not queue an additional packet,
1314  * because qdisc is full or receiver sent a 0 window, or we are paced.
1315  * We do not want to add fuel to the fire, or abort too early,
1316  * so make sure the timer we arm now is at least 200ms in the future,
1317  * regardless of current icsk_rto value (as it could be ~2ms)
1318  */
tcp_probe0_base(const struct sock * sk)1319 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1320 {
1321 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1322 }
1323 
1324 /* Variant of inet_csk_rto_backoff() used for zero window probes */
tcp_probe0_when(const struct sock * sk,unsigned long max_when)1325 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1326 					    unsigned long max_when)
1327 {
1328 	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1329 
1330 	return (unsigned long)min_t(u64, when, max_when);
1331 }
1332 
tcp_check_probe_timer(struct sock * sk)1333 static inline void tcp_check_probe_timer(struct sock *sk)
1334 {
1335 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1336 		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1337 				     tcp_probe0_base(sk), TCP_RTO_MAX);
1338 }
1339 
tcp_init_wl(struct tcp_sock * tp,u32 seq)1340 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1341 {
1342 	tp->snd_wl1 = seq;
1343 }
1344 
tcp_update_wl(struct tcp_sock * tp,u32 seq)1345 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1346 {
1347 	tp->snd_wl1 = seq;
1348 }
1349 
1350 /*
1351  * Calculate(/check) TCP checksum
1352  */
tcp_v4_check(int len,__be32 saddr,__be32 daddr,__wsum base)1353 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1354 				   __be32 daddr, __wsum base)
1355 {
1356 	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1357 }
1358 
tcp_checksum_complete(struct sk_buff * skb)1359 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1360 {
1361 	return !skb_csum_unnecessary(skb) &&
1362 		__skb_checksum_complete(skb);
1363 }
1364 
1365 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1366 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1367 void tcp_set_state(struct sock *sk, int state);
1368 void tcp_done(struct sock *sk);
1369 int tcp_abort(struct sock *sk, int err);
1370 
tcp_sack_reset(struct tcp_options_received * rx_opt)1371 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1372 {
1373 	rx_opt->dsack = 0;
1374 	rx_opt->num_sacks = 0;
1375 }
1376 
1377 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1378 
tcp_slow_start_after_idle_check(struct sock * sk)1379 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1380 {
1381 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1382 	struct tcp_sock *tp = tcp_sk(sk);
1383 	s32 delta;
1384 
1385 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1386 	    tp->packets_out || ca_ops->cong_control)
1387 		return;
1388 	delta = tcp_jiffies32 - tp->lsndtime;
1389 	if (delta > inet_csk(sk)->icsk_rto)
1390 		tcp_cwnd_restart(sk, delta);
1391 }
1392 
1393 /* Determine a window scaling and initial window to offer. */
1394 void tcp_select_initial_window(const struct sock *sk, int __space,
1395 			       __u32 mss, __u32 *rcv_wnd,
1396 			       __u32 *window_clamp, int wscale_ok,
1397 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1398 
tcp_win_from_space(const struct sock * sk,int space)1399 static inline int tcp_win_from_space(const struct sock *sk, int space)
1400 {
1401 	int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
1402 
1403 	return tcp_adv_win_scale <= 0 ?
1404 		(space>>(-tcp_adv_win_scale)) :
1405 		space - (space>>tcp_adv_win_scale);
1406 }
1407 
1408 /* Note: caller must be prepared to deal with negative returns */
tcp_space(const struct sock * sk)1409 static inline int tcp_space(const struct sock *sk)
1410 {
1411 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1412 				  READ_ONCE(sk->sk_backlog.len) -
1413 				  atomic_read(&sk->sk_rmem_alloc));
1414 }
1415 
tcp_full_space(const struct sock * sk)1416 static inline int tcp_full_space(const struct sock *sk)
1417 {
1418 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1419 }
1420 
1421 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1422 
1423 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1424  * If 87.5 % (7/8) of the space has been consumed, we want to override
1425  * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1426  * len/truesize ratio.
1427  */
tcp_rmem_pressure(const struct sock * sk)1428 static inline bool tcp_rmem_pressure(const struct sock *sk)
1429 {
1430 	int rcvbuf, threshold;
1431 
1432 	if (tcp_under_memory_pressure(sk))
1433 		return true;
1434 
1435 	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1436 	threshold = rcvbuf - (rcvbuf >> 3);
1437 
1438 	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1439 }
1440 
1441 extern void tcp_openreq_init_rwin(struct request_sock *req,
1442 				  const struct sock *sk_listener,
1443 				  const struct dst_entry *dst);
1444 
1445 void tcp_enter_memory_pressure(struct sock *sk);
1446 void tcp_leave_memory_pressure(struct sock *sk);
1447 
keepalive_intvl_when(const struct tcp_sock * tp)1448 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1449 {
1450 	struct net *net = sock_net((struct sock *)tp);
1451 	int val;
1452 
1453 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1454 	 * and do_tcp_setsockopt().
1455 	 */
1456 	val = READ_ONCE(tp->keepalive_intvl);
1457 
1458 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1459 }
1460 
keepalive_time_when(const struct tcp_sock * tp)1461 static inline int keepalive_time_when(const struct tcp_sock *tp)
1462 {
1463 	struct net *net = sock_net((struct sock *)tp);
1464 	int val;
1465 
1466 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1467 	val = READ_ONCE(tp->keepalive_time);
1468 
1469 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1470 }
1471 
keepalive_probes(const struct tcp_sock * tp)1472 static inline int keepalive_probes(const struct tcp_sock *tp)
1473 {
1474 	struct net *net = sock_net((struct sock *)tp);
1475 	int val;
1476 
1477 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1478 	 * and do_tcp_setsockopt().
1479 	 */
1480 	val = READ_ONCE(tp->keepalive_probes);
1481 
1482 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1483 }
1484 
keepalive_time_elapsed(const struct tcp_sock * tp)1485 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1486 {
1487 	const struct inet_connection_sock *icsk = &tp->inet_conn;
1488 
1489 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1490 			  tcp_jiffies32 - tp->rcv_tstamp);
1491 }
1492 
tcp_fin_time(const struct sock * sk)1493 static inline int tcp_fin_time(const struct sock *sk)
1494 {
1495 	int fin_timeout = tcp_sk(sk)->linger2 ? :
1496 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1497 	const int rto = inet_csk(sk)->icsk_rto;
1498 
1499 	if (fin_timeout < (rto << 2) - (rto >> 1))
1500 		fin_timeout = (rto << 2) - (rto >> 1);
1501 
1502 	return fin_timeout;
1503 }
1504 
tcp_paws_check(const struct tcp_options_received * rx_opt,int paws_win)1505 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1506 				  int paws_win)
1507 {
1508 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1509 		return true;
1510 	if (unlikely(!time_before32(ktime_get_seconds(),
1511 				    rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1512 		return true;
1513 	/*
1514 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1515 	 * then following tcp messages have valid values. Ignore 0 value,
1516 	 * or else 'negative' tsval might forbid us to accept their packets.
1517 	 */
1518 	if (!rx_opt->ts_recent)
1519 		return true;
1520 	return false;
1521 }
1522 
tcp_paws_reject(const struct tcp_options_received * rx_opt,int rst)1523 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1524 				   int rst)
1525 {
1526 	if (tcp_paws_check(rx_opt, 0))
1527 		return false;
1528 
1529 	/* RST segments are not recommended to carry timestamp,
1530 	   and, if they do, it is recommended to ignore PAWS because
1531 	   "their cleanup function should take precedence over timestamps."
1532 	   Certainly, it is mistake. It is necessary to understand the reasons
1533 	   of this constraint to relax it: if peer reboots, clock may go
1534 	   out-of-sync and half-open connections will not be reset.
1535 	   Actually, the problem would be not existing if all
1536 	   the implementations followed draft about maintaining clock
1537 	   via reboots. Linux-2.2 DOES NOT!
1538 
1539 	   However, we can relax time bounds for RST segments to MSL.
1540 	 */
1541 	if (rst && !time_before32(ktime_get_seconds(),
1542 				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1543 		return false;
1544 	return true;
1545 }
1546 
1547 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1548 			  int mib_idx, u32 *last_oow_ack_time);
1549 
tcp_mib_init(struct net * net)1550 static inline void tcp_mib_init(struct net *net)
1551 {
1552 	/* See RFC 2012 */
1553 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1554 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1555 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1556 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1557 }
1558 
1559 /* from STCP */
tcp_clear_retrans_hints_partial(struct tcp_sock * tp)1560 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1561 {
1562 	tp->lost_skb_hint = NULL;
1563 }
1564 
tcp_clear_all_retrans_hints(struct tcp_sock * tp)1565 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1566 {
1567 	tcp_clear_retrans_hints_partial(tp);
1568 	tp->retransmit_skb_hint = NULL;
1569 }
1570 
1571 union tcp_md5_addr {
1572 	struct in_addr  a4;
1573 #if IS_ENABLED(CONFIG_IPV6)
1574 	struct in6_addr	a6;
1575 #endif
1576 };
1577 
1578 /* - key database */
1579 struct tcp_md5sig_key {
1580 	struct hlist_node	node;
1581 	u8			keylen;
1582 	u8			family; /* AF_INET or AF_INET6 */
1583 	u8			prefixlen;
1584 	union tcp_md5_addr	addr;
1585 	int			l3index; /* set if key added with L3 scope */
1586 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1587 	struct rcu_head		rcu;
1588 };
1589 
1590 /* - sock block */
1591 struct tcp_md5sig_info {
1592 	struct hlist_head	head;
1593 	struct rcu_head		rcu;
1594 };
1595 
1596 /* - pseudo header */
1597 struct tcp4_pseudohdr {
1598 	__be32		saddr;
1599 	__be32		daddr;
1600 	__u8		pad;
1601 	__u8		protocol;
1602 	__be16		len;
1603 };
1604 
1605 struct tcp6_pseudohdr {
1606 	struct in6_addr	saddr;
1607 	struct in6_addr daddr;
1608 	__be32		len;
1609 	__be32		protocol;	/* including padding */
1610 };
1611 
1612 union tcp_md5sum_block {
1613 	struct tcp4_pseudohdr ip4;
1614 #if IS_ENABLED(CONFIG_IPV6)
1615 	struct tcp6_pseudohdr ip6;
1616 #endif
1617 };
1618 
1619 /* - pool: digest algorithm, hash description and scratch buffer */
1620 struct tcp_md5sig_pool {
1621 	struct ahash_request	*md5_req;
1622 	void			*scratch;
1623 };
1624 
1625 /* - functions */
1626 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1627 			const struct sock *sk, const struct sk_buff *skb);
1628 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1629 		   int family, u8 prefixlen, int l3index,
1630 		   const u8 *newkey, u8 newkeylen, gfp_t gfp);
1631 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1632 		   int family, u8 prefixlen, int l3index);
1633 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1634 					 const struct sock *addr_sk);
1635 
1636 #ifdef CONFIG_TCP_MD5SIG
1637 #include <linux/jump_label.h>
1638 extern struct static_key_false tcp_md5_needed;
1639 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1640 					   const union tcp_md5_addr *addr,
1641 					   int family);
1642 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)1643 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1644 		  const union tcp_md5_addr *addr, int family)
1645 {
1646 	if (!static_branch_unlikely(&tcp_md5_needed))
1647 		return NULL;
1648 	return __tcp_md5_do_lookup(sk, l3index, addr, family);
1649 }
1650 
1651 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1652 #else
1653 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)1654 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1655 		  const union tcp_md5_addr *addr, int family)
1656 {
1657 	return NULL;
1658 }
1659 #define tcp_twsk_md5_key(twsk)	NULL
1660 #endif
1661 
1662 bool tcp_alloc_md5sig_pool(void);
1663 
1664 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
tcp_put_md5sig_pool(void)1665 static inline void tcp_put_md5sig_pool(void)
1666 {
1667 	local_bh_enable();
1668 }
1669 
1670 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1671 			  unsigned int header_len);
1672 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1673 		     const struct tcp_md5sig_key *key);
1674 
1675 /* From tcp_fastopen.c */
1676 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1677 			    struct tcp_fastopen_cookie *cookie);
1678 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1679 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1680 			    u16 try_exp);
1681 struct tcp_fastopen_request {
1682 	/* Fast Open cookie. Size 0 means a cookie request */
1683 	struct tcp_fastopen_cookie	cookie;
1684 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1685 	size_t				size;
1686 	int				copied;	/* queued in tcp_connect() */
1687 	struct ubuf_info		*uarg;
1688 };
1689 void tcp_free_fastopen_req(struct tcp_sock *tp);
1690 void tcp_fastopen_destroy_cipher(struct sock *sk);
1691 void tcp_fastopen_ctx_destroy(struct net *net);
1692 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1693 			      void *primary_key, void *backup_key);
1694 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1695 			    u64 *key);
1696 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1697 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1698 			      struct request_sock *req,
1699 			      struct tcp_fastopen_cookie *foc,
1700 			      const struct dst_entry *dst);
1701 void tcp_fastopen_init_key_once(struct net *net);
1702 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1703 			     struct tcp_fastopen_cookie *cookie);
1704 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1705 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1706 #define TCP_FASTOPEN_KEY_MAX 2
1707 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
1708 	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1709 
1710 /* Fastopen key context */
1711 struct tcp_fastopen_context {
1712 	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
1713 	int		num;
1714 	struct rcu_head	rcu;
1715 };
1716 
1717 extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1718 void tcp_fastopen_active_disable(struct sock *sk);
1719 bool tcp_fastopen_active_should_disable(struct sock *sk);
1720 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1721 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1722 
1723 /* Caller needs to wrap with rcu_read_(un)lock() */
1724 static inline
tcp_fastopen_get_ctx(const struct sock * sk)1725 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1726 {
1727 	struct tcp_fastopen_context *ctx;
1728 
1729 	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1730 	if (!ctx)
1731 		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1732 	return ctx;
1733 }
1734 
1735 static inline
tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie * foc,const struct tcp_fastopen_cookie * orig)1736 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1737 			       const struct tcp_fastopen_cookie *orig)
1738 {
1739 	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1740 	    orig->len == foc->len &&
1741 	    !memcmp(orig->val, foc->val, foc->len))
1742 		return true;
1743 	return false;
1744 }
1745 
1746 static inline
tcp_fastopen_context_len(const struct tcp_fastopen_context * ctx)1747 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1748 {
1749 	return ctx->num;
1750 }
1751 
1752 /* Latencies incurred by various limits for a sender. They are
1753  * chronograph-like stats that are mutually exclusive.
1754  */
1755 enum tcp_chrono {
1756 	TCP_CHRONO_UNSPEC,
1757 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1758 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1759 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1760 	__TCP_CHRONO_MAX,
1761 };
1762 
1763 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1764 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1765 
1766 /* This helper is needed, because skb->tcp_tsorted_anchor uses
1767  * the same memory storage than skb->destructor/_skb_refdst
1768  */
tcp_skb_tsorted_anchor_cleanup(struct sk_buff * skb)1769 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1770 {
1771 	skb->destructor = NULL;
1772 	skb->_skb_refdst = 0UL;
1773 }
1774 
1775 #define tcp_skb_tsorted_save(skb) {		\
1776 	unsigned long _save = skb->_skb_refdst;	\
1777 	skb->_skb_refdst = 0UL;
1778 
1779 #define tcp_skb_tsorted_restore(skb)		\
1780 	skb->_skb_refdst = _save;		\
1781 }
1782 
1783 void tcp_write_queue_purge(struct sock *sk);
1784 
tcp_rtx_queue_head(const struct sock * sk)1785 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1786 {
1787 	return skb_rb_first(&sk->tcp_rtx_queue);
1788 }
1789 
tcp_rtx_queue_tail(const struct sock * sk)1790 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1791 {
1792 	return skb_rb_last(&sk->tcp_rtx_queue);
1793 }
1794 
tcp_write_queue_head(const struct sock * sk)1795 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1796 {
1797 	return skb_peek(&sk->sk_write_queue);
1798 }
1799 
tcp_write_queue_tail(const struct sock * sk)1800 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1801 {
1802 	return skb_peek_tail(&sk->sk_write_queue);
1803 }
1804 
1805 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1806 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1807 
tcp_send_head(const struct sock * sk)1808 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1809 {
1810 	return skb_peek(&sk->sk_write_queue);
1811 }
1812 
tcp_skb_is_last(const struct sock * sk,const struct sk_buff * skb)1813 static inline bool tcp_skb_is_last(const struct sock *sk,
1814 				   const struct sk_buff *skb)
1815 {
1816 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1817 }
1818 
1819 /**
1820  * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1821  * @sk: socket
1822  *
1823  * Since the write queue can have a temporary empty skb in it,
1824  * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1825  */
tcp_write_queue_empty(const struct sock * sk)1826 static inline bool tcp_write_queue_empty(const struct sock *sk)
1827 {
1828 	const struct tcp_sock *tp = tcp_sk(sk);
1829 
1830 	return tp->write_seq == tp->snd_nxt;
1831 }
1832 
tcp_rtx_queue_empty(const struct sock * sk)1833 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1834 {
1835 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1836 }
1837 
tcp_rtx_and_write_queues_empty(const struct sock * sk)1838 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1839 {
1840 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1841 }
1842 
tcp_add_write_queue_tail(struct sock * sk,struct sk_buff * skb)1843 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1844 {
1845 	__skb_queue_tail(&sk->sk_write_queue, skb);
1846 
1847 	/* Queue it, remembering where we must start sending. */
1848 	if (sk->sk_write_queue.next == skb)
1849 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1850 }
1851 
1852 /* Insert new before skb on the write queue of sk.  */
tcp_insert_write_queue_before(struct sk_buff * new,struct sk_buff * skb,struct sock * sk)1853 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1854 						  struct sk_buff *skb,
1855 						  struct sock *sk)
1856 {
1857 	__skb_queue_before(&sk->sk_write_queue, skb, new);
1858 }
1859 
tcp_unlink_write_queue(struct sk_buff * skb,struct sock * sk)1860 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1861 {
1862 	tcp_skb_tsorted_anchor_cleanup(skb);
1863 	__skb_unlink(skb, &sk->sk_write_queue);
1864 }
1865 
1866 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1867 
tcp_rtx_queue_unlink(struct sk_buff * skb,struct sock * sk)1868 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1869 {
1870 	tcp_skb_tsorted_anchor_cleanup(skb);
1871 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1872 }
1873 
tcp_rtx_queue_unlink_and_free(struct sk_buff * skb,struct sock * sk)1874 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1875 {
1876 	list_del(&skb->tcp_tsorted_anchor);
1877 	tcp_rtx_queue_unlink(skb, sk);
1878 	sk_wmem_free_skb(sk, skb);
1879 }
1880 
tcp_push_pending_frames(struct sock * sk)1881 static inline void tcp_push_pending_frames(struct sock *sk)
1882 {
1883 	if (tcp_send_head(sk)) {
1884 		struct tcp_sock *tp = tcp_sk(sk);
1885 
1886 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1887 	}
1888 }
1889 
1890 /* Start sequence of the skb just after the highest skb with SACKed
1891  * bit, valid only if sacked_out > 0 or when the caller has ensured
1892  * validity by itself.
1893  */
tcp_highest_sack_seq(struct tcp_sock * tp)1894 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1895 {
1896 	if (!tp->sacked_out)
1897 		return tp->snd_una;
1898 
1899 	if (tp->highest_sack == NULL)
1900 		return tp->snd_nxt;
1901 
1902 	return TCP_SKB_CB(tp->highest_sack)->seq;
1903 }
1904 
tcp_advance_highest_sack(struct sock * sk,struct sk_buff * skb)1905 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1906 {
1907 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1908 }
1909 
tcp_highest_sack(struct sock * sk)1910 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1911 {
1912 	return tcp_sk(sk)->highest_sack;
1913 }
1914 
tcp_highest_sack_reset(struct sock * sk)1915 static inline void tcp_highest_sack_reset(struct sock *sk)
1916 {
1917 	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1918 }
1919 
1920 /* Called when old skb is about to be deleted and replaced by new skb */
tcp_highest_sack_replace(struct sock * sk,struct sk_buff * old,struct sk_buff * new)1921 static inline void tcp_highest_sack_replace(struct sock *sk,
1922 					    struct sk_buff *old,
1923 					    struct sk_buff *new)
1924 {
1925 	if (old == tcp_highest_sack(sk))
1926 		tcp_sk(sk)->highest_sack = new;
1927 }
1928 
1929 /* This helper checks if socket has IP_TRANSPARENT set */
inet_sk_transparent(const struct sock * sk)1930 static inline bool inet_sk_transparent(const struct sock *sk)
1931 {
1932 	switch (sk->sk_state) {
1933 	case TCP_TIME_WAIT:
1934 		return inet_twsk(sk)->tw_transparent;
1935 	case TCP_NEW_SYN_RECV:
1936 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1937 	}
1938 	return inet_sk(sk)->transparent;
1939 }
1940 
1941 /* Determines whether this is a thin stream (which may suffer from
1942  * increased latency). Used to trigger latency-reducing mechanisms.
1943  */
tcp_stream_is_thin(struct tcp_sock * tp)1944 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1945 {
1946 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1947 }
1948 
1949 /* /proc */
1950 enum tcp_seq_states {
1951 	TCP_SEQ_STATE_LISTENING,
1952 	TCP_SEQ_STATE_ESTABLISHED,
1953 };
1954 
1955 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
1956 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1957 void tcp_seq_stop(struct seq_file *seq, void *v);
1958 
1959 struct tcp_seq_afinfo {
1960 	sa_family_t			family;
1961 };
1962 
1963 struct tcp_iter_state {
1964 	struct seq_net_private	p;
1965 	enum tcp_seq_states	state;
1966 	struct sock		*syn_wait_sk;
1967 	struct tcp_seq_afinfo	*bpf_seq_afinfo;
1968 	int			bucket, offset, sbucket, num;
1969 	loff_t			last_pos;
1970 };
1971 
1972 extern struct request_sock_ops tcp_request_sock_ops;
1973 extern struct request_sock_ops tcp6_request_sock_ops;
1974 
1975 void tcp_v4_destroy_sock(struct sock *sk);
1976 
1977 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1978 				netdev_features_t features);
1979 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
1980 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
1981 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
1982 INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
1983 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
1984 int tcp_gro_complete(struct sk_buff *skb);
1985 
1986 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1987 
tcp_notsent_lowat(const struct tcp_sock * tp)1988 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1989 {
1990 	struct net *net = sock_net((struct sock *)tp);
1991 	u32 val;
1992 
1993 	val = READ_ONCE(tp->notsent_lowat);
1994 
1995 	return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
1996 }
1997 
1998 /* @wake is one when sk_stream_write_space() calls us.
1999  * This sends EPOLLOUT only if notsent_bytes is half the limit.
2000  * This mimics the strategy used in sock_def_write_space().
2001  */
tcp_stream_memory_free(const struct sock * sk,int wake)2002 static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
2003 {
2004 	const struct tcp_sock *tp = tcp_sk(sk);
2005 	u32 notsent_bytes = READ_ONCE(tp->write_seq) -
2006 			    READ_ONCE(tp->snd_nxt);
2007 
2008 	return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
2009 }
2010 
2011 #ifdef CONFIG_PROC_FS
2012 int tcp4_proc_init(void);
2013 void tcp4_proc_exit(void);
2014 #endif
2015 
2016 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2017 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2018 		     const struct tcp_request_sock_ops *af_ops,
2019 		     struct sock *sk, struct sk_buff *skb);
2020 
2021 /* TCP af-specific functions */
2022 struct tcp_sock_af_ops {
2023 #ifdef CONFIG_TCP_MD5SIG
2024 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2025 						const struct sock *addr_sk);
2026 	int		(*calc_md5_hash)(char *location,
2027 					 const struct tcp_md5sig_key *md5,
2028 					 const struct sock *sk,
2029 					 const struct sk_buff *skb);
2030 	int		(*md5_parse)(struct sock *sk,
2031 				     int optname,
2032 				     sockptr_t optval,
2033 				     int optlen);
2034 #endif
2035 };
2036 
2037 struct tcp_request_sock_ops {
2038 	u16 mss_clamp;
2039 #ifdef CONFIG_TCP_MD5SIG
2040 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2041 						 const struct sock *addr_sk);
2042 	int		(*calc_md5_hash) (char *location,
2043 					  const struct tcp_md5sig_key *md5,
2044 					  const struct sock *sk,
2045 					  const struct sk_buff *skb);
2046 #endif
2047 	void (*init_req)(struct request_sock *req,
2048 			 const struct sock *sk_listener,
2049 			 struct sk_buff *skb);
2050 #ifdef CONFIG_SYN_COOKIES
2051 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2052 				 __u16 *mss);
2053 #endif
2054 	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
2055 				       const struct request_sock *req);
2056 	u32 (*init_seq)(const struct sk_buff *skb);
2057 	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2058 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2059 			   struct flowi *fl, struct request_sock *req,
2060 			   struct tcp_fastopen_cookie *foc,
2061 			   enum tcp_synack_type synack_type,
2062 			   struct sk_buff *syn_skb);
2063 };
2064 
2065 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2066 #if IS_ENABLED(CONFIG_IPV6)
2067 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2068 #endif
2069 
2070 #ifdef CONFIG_SYN_COOKIES
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2071 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2072 					 const struct sock *sk, struct sk_buff *skb,
2073 					 __u16 *mss)
2074 {
2075 	tcp_synq_overflow(sk);
2076 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2077 	return ops->cookie_init_seq(skb, mss);
2078 }
2079 #else
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2080 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2081 					 const struct sock *sk, struct sk_buff *skb,
2082 					 __u16 *mss)
2083 {
2084 	return 0;
2085 }
2086 #endif
2087 
2088 int tcpv4_offload_init(void);
2089 
2090 void tcp_v4_init(void);
2091 void tcp_init(void);
2092 
2093 /* tcp_recovery.c */
2094 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2095 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2096 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2097 				u32 reo_wnd);
2098 extern bool tcp_rack_mark_lost(struct sock *sk);
2099 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2100 			     u64 xmit_time);
2101 extern void tcp_rack_reo_timeout(struct sock *sk);
2102 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2103 
2104 /* At how many usecs into the future should the RTO fire? */
tcp_rto_delta_us(const struct sock * sk)2105 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2106 {
2107 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2108 	u32 rto = inet_csk(sk)->icsk_rto;
2109 	u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2110 
2111 	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2112 }
2113 
2114 /*
2115  * Save and compile IPv4 options, return a pointer to it
2116  */
tcp_v4_save_options(struct net * net,struct sk_buff * skb)2117 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2118 							 struct sk_buff *skb)
2119 {
2120 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2121 	struct ip_options_rcu *dopt = NULL;
2122 
2123 	if (opt->optlen) {
2124 		int opt_size = sizeof(*dopt) + opt->optlen;
2125 
2126 		dopt = kmalloc(opt_size, GFP_ATOMIC);
2127 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2128 			kfree(dopt);
2129 			dopt = NULL;
2130 		}
2131 	}
2132 	return dopt;
2133 }
2134 
2135 /* locally generated TCP pure ACKs have skb->truesize == 2
2136  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2137  * This is much faster than dissecting the packet to find out.
2138  * (Think of GRE encapsulations, IPv4, IPv6, ...)
2139  */
skb_is_tcp_pure_ack(const struct sk_buff * skb)2140 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2141 {
2142 	return skb->truesize == 2;
2143 }
2144 
skb_set_tcp_pure_ack(struct sk_buff * skb)2145 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2146 {
2147 	skb->truesize = 2;
2148 }
2149 
tcp_inq(struct sock * sk)2150 static inline int tcp_inq(struct sock *sk)
2151 {
2152 	struct tcp_sock *tp = tcp_sk(sk);
2153 	int answ;
2154 
2155 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2156 		answ = 0;
2157 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2158 		   !tp->urg_data ||
2159 		   before(tp->urg_seq, tp->copied_seq) ||
2160 		   !before(tp->urg_seq, tp->rcv_nxt)) {
2161 
2162 		answ = tp->rcv_nxt - tp->copied_seq;
2163 
2164 		/* Subtract 1, if FIN was received */
2165 		if (answ && sock_flag(sk, SOCK_DONE))
2166 			answ--;
2167 	} else {
2168 		answ = tp->urg_seq - tp->copied_seq;
2169 	}
2170 
2171 	return answ;
2172 }
2173 
2174 int tcp_peek_len(struct socket *sock);
2175 
tcp_segs_in(struct tcp_sock * tp,const struct sk_buff * skb)2176 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2177 {
2178 	u16 segs_in;
2179 
2180 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2181 	tp->segs_in += segs_in;
2182 	if (skb->len > tcp_hdrlen(skb))
2183 		tp->data_segs_in += segs_in;
2184 }
2185 
2186 /*
2187  * TCP listen path runs lockless.
2188  * We forced "struct sock" to be const qualified to make sure
2189  * we don't modify one of its field by mistake.
2190  * Here, we increment sk_drops which is an atomic_t, so we can safely
2191  * make sock writable again.
2192  */
tcp_listendrop(const struct sock * sk)2193 static inline void tcp_listendrop(const struct sock *sk)
2194 {
2195 	atomic_inc(&((struct sock *)sk)->sk_drops);
2196 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2197 }
2198 
2199 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2200 
2201 /*
2202  * Interface for adding Upper Level Protocols over TCP
2203  */
2204 
2205 #define TCP_ULP_NAME_MAX	16
2206 #define TCP_ULP_MAX		128
2207 #define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2208 
2209 struct tcp_ulp_ops {
2210 	struct list_head	list;
2211 
2212 	/* initialize ulp */
2213 	int (*init)(struct sock *sk);
2214 	/* update ulp */
2215 	void (*update)(struct sock *sk, struct proto *p,
2216 		       void (*write_space)(struct sock *sk));
2217 	/* cleanup ulp */
2218 	void (*release)(struct sock *sk);
2219 	/* diagnostic */
2220 	int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2221 	size_t (*get_info_size)(const struct sock *sk);
2222 	/* clone ulp */
2223 	void (*clone)(const struct request_sock *req, struct sock *newsk,
2224 		      const gfp_t priority);
2225 
2226 	char		name[TCP_ULP_NAME_MAX];
2227 	struct module	*owner;
2228 };
2229 int tcp_register_ulp(struct tcp_ulp_ops *type);
2230 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2231 int tcp_set_ulp(struct sock *sk, const char *name);
2232 void tcp_get_available_ulp(char *buf, size_t len);
2233 void tcp_cleanup_ulp(struct sock *sk);
2234 void tcp_update_ulp(struct sock *sk, struct proto *p,
2235 		    void (*write_space)(struct sock *sk));
2236 
2237 #define MODULE_ALIAS_TCP_ULP(name)				\
2238 	__MODULE_INFO(alias, alias_userspace, name);		\
2239 	__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2240 
2241 struct sk_msg;
2242 struct sk_psock;
2243 
2244 #ifdef CONFIG_BPF_STREAM_PARSER
2245 struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2246 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2247 #else
tcp_bpf_clone(const struct sock * sk,struct sock * newsk)2248 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2249 {
2250 }
2251 #endif /* CONFIG_BPF_STREAM_PARSER */
2252 
2253 #ifdef CONFIG_NET_SOCK_MSG
2254 int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
2255 			  int flags);
2256 int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
2257 		      struct msghdr *msg, int len, int flags);
2258 #endif /* CONFIG_NET_SOCK_MSG */
2259 
2260 #ifdef CONFIG_CGROUP_BPF
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2261 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2262 				      struct sk_buff *skb,
2263 				      unsigned int end_offset)
2264 {
2265 	skops->skb = skb;
2266 	skops->skb_data_end = skb->data + end_offset;
2267 }
2268 #else
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2269 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2270 				      struct sk_buff *skb,
2271 				      unsigned int end_offset)
2272 {
2273 }
2274 #endif
2275 
2276 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2277  * is < 0, then the BPF op failed (for example if the loaded BPF
2278  * program does not support the chosen operation or there is no BPF
2279  * program loaded).
2280  */
2281 #ifdef CONFIG_BPF
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2282 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2283 {
2284 	struct bpf_sock_ops_kern sock_ops;
2285 	int ret;
2286 
2287 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2288 	if (sk_fullsock(sk)) {
2289 		sock_ops.is_fullsock = 1;
2290 		sock_owned_by_me(sk);
2291 	}
2292 
2293 	sock_ops.sk = sk;
2294 	sock_ops.op = op;
2295 	if (nargs > 0)
2296 		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2297 
2298 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2299 	if (ret == 0)
2300 		ret = sock_ops.reply;
2301 	else
2302 		ret = -1;
2303 	return ret;
2304 }
2305 
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2306 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2307 {
2308 	u32 args[2] = {arg1, arg2};
2309 
2310 	return tcp_call_bpf(sk, op, 2, args);
2311 }
2312 
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2313 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2314 				    u32 arg3)
2315 {
2316 	u32 args[3] = {arg1, arg2, arg3};
2317 
2318 	return tcp_call_bpf(sk, op, 3, args);
2319 }
2320 
2321 #else
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2322 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2323 {
2324 	return -EPERM;
2325 }
2326 
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2327 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2328 {
2329 	return -EPERM;
2330 }
2331 
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2332 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2333 				    u32 arg3)
2334 {
2335 	return -EPERM;
2336 }
2337 
2338 #endif
2339 
tcp_timeout_init(struct sock * sk)2340 static inline u32 tcp_timeout_init(struct sock *sk)
2341 {
2342 	int timeout;
2343 
2344 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2345 
2346 	if (timeout <= 0)
2347 		timeout = TCP_TIMEOUT_INIT;
2348 	return timeout;
2349 }
2350 
tcp_rwnd_init_bpf(struct sock * sk)2351 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2352 {
2353 	int rwnd;
2354 
2355 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2356 
2357 	if (rwnd < 0)
2358 		rwnd = 0;
2359 	return rwnd;
2360 }
2361 
tcp_bpf_ca_needs_ecn(struct sock * sk)2362 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2363 {
2364 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2365 }
2366 
tcp_bpf_rtt(struct sock * sk)2367 static inline void tcp_bpf_rtt(struct sock *sk)
2368 {
2369 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2370 		tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2371 }
2372 
2373 #if IS_ENABLED(CONFIG_SMC)
2374 extern struct static_key_false tcp_have_smc;
2375 #endif
2376 
2377 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2378 void clean_acked_data_enable(struct inet_connection_sock *icsk,
2379 			     void (*cad)(struct sock *sk, u32 ack_seq));
2380 void clean_acked_data_disable(struct inet_connection_sock *icsk);
2381 void clean_acked_data_flush(void);
2382 #endif
2383 
2384 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
tcp_add_tx_delay(struct sk_buff * skb,const struct tcp_sock * tp)2385 static inline void tcp_add_tx_delay(struct sk_buff *skb,
2386 				    const struct tcp_sock *tp)
2387 {
2388 	if (static_branch_unlikely(&tcp_tx_delay_enabled))
2389 		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2390 }
2391 
2392 /* Compute Earliest Departure Time for some control packets
2393  * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2394  */
tcp_transmit_time(const struct sock * sk)2395 static inline u64 tcp_transmit_time(const struct sock *sk)
2396 {
2397 	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2398 		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2399 			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2400 
2401 		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2402 	}
2403 	return 0;
2404 }
2405 
2406 #endif	/* _TCP_H */
2407