• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the TCP module.
8  *
9  * Version:	@(#)tcp.h	1.0.5	05/23/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  */
14 #ifndef _TCP_H
15 #define _TCP_H
16 
17 #define FASTRETRANS_DEBUG 1
18 
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
29 
30 #include <net/inet_connection_sock.h>
31 #include <net/inet_timewait_sock.h>
32 #include <net/inet_hashtables.h>
33 #include <net/checksum.h>
34 #include <net/request_sock.h>
35 #include <net/sock_reuseport.h>
36 #include <net/sock.h>
37 #include <net/snmp.h>
38 #include <net/ip.h>
39 #include <net/tcp_states.h>
40 #include <net/inet_ecn.h>
41 #include <net/dst.h>
42 #include <net/mptcp.h>
43 
44 #include <linux/seq_file.h>
45 #include <linux/memcontrol.h>
46 #include <linux/bpf-cgroup.h>
47 #include <linux/siphash.h>
48 
49 extern struct inet_hashinfo tcp_hashinfo;
50 
51 extern struct percpu_counter tcp_orphan_count;
52 void tcp_time_wait(struct sock *sk, int state, int timeo);
53 
54 #define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
55 #define MAX_TCP_OPTION_SPACE 40
56 #define TCP_MIN_SND_MSS		48
57 #define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
58 
59 /*
60  * Never offer a window over 32767 without using window scaling. Some
61  * poor stacks do signed 16bit maths!
62  */
63 #define MAX_TCP_WINDOW		32767U
64 
65 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
66 #define TCP_MIN_MSS		88U
67 
68 /* The initial MTU to use for probing */
69 #define TCP_BASE_MSS		1024
70 
71 /* probing interval, default to 10 minutes as per RFC4821 */
72 #define TCP_PROBE_INTERVAL	600
73 
74 /* Specify interval when tcp mtu probing will stop */
75 #define TCP_PROBE_THRESHOLD	8
76 
77 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
78 #define TCP_FASTRETRANS_THRESH 3
79 
80 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
81 #define TCP_MAX_QUICKACKS	16U
82 
83 /* Maximal number of window scale according to RFC1323 */
84 #define TCP_MAX_WSCALE		14U
85 
86 /* urg_data states */
87 #define TCP_URG_VALID	0x0100
88 #define TCP_URG_NOTYET	0x0200
89 #define TCP_URG_READ	0x0400
90 
91 #define TCP_RETR1	3	/*
92 				 * This is how many retries it does before it
93 				 * tries to figure out if the gateway is
94 				 * down. Minimal RFC value is 3; it corresponds
95 				 * to ~3sec-8min depending on RTO.
96 				 */
97 
98 #define TCP_RETR2	15	/*
99 				 * This should take at least
100 				 * 90 minutes to time out.
101 				 * RFC1122 says that the limit is 100 sec.
102 				 * 15 is ~13-30min depending on RTO.
103 				 */
104 
105 #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
106 				 * when active opening a connection.
107 				 * RFC1122 says the minimum retry MUST
108 				 * be at least 180secs.  Nevertheless
109 				 * this value is corresponding to
110 				 * 63secs of retransmission with the
111 				 * current initial RTO.
112 				 */
113 
114 #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
115 				 * when passive opening a connection.
116 				 * This is corresponding to 31secs of
117 				 * retransmission with the current
118 				 * initial RTO.
119 				 */
120 
121 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
122 				  * state, about 60 seconds	*/
123 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
124                                  /* BSD style FIN_WAIT2 deadlock breaker.
125 				  * It used to be 3min, new value is 60sec,
126 				  * to combine FIN-WAIT-2 timeout with
127 				  * TIME-WAIT timer.
128 				  */
129 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
130 
131 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
132 #if HZ >= 100
133 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
134 #define TCP_ATO_MIN	((unsigned)(HZ/25))
135 #else
136 #define TCP_DELACK_MIN	4U
137 #define TCP_ATO_MIN	4U
138 #endif
139 #define TCP_RTO_MAX	((unsigned)(120*HZ))
140 #define TCP_RTO_MIN	((unsigned)(HZ/5))
141 #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
142 
143 #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
144 
145 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
146 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
147 						 * used as a fallback RTO for the
148 						 * initial data transmission if no
149 						 * valid RTT sample has been acquired,
150 						 * most likely due to retrans in 3WHS.
151 						 */
152 
153 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
154 					                 * for local resources.
155 					                 */
156 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
157 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
158 #define TCP_KEEPALIVE_INTVL	(75*HZ)
159 
160 #define MAX_TCP_KEEPIDLE	32767
161 #define MAX_TCP_KEEPINTVL	32767
162 #define MAX_TCP_KEEPCNT		127
163 #define MAX_TCP_SYNCNT		127
164 
165 #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
166 
167 #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
168 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
169 					 * after this time. It should be equal
170 					 * (or greater than) TCP_TIMEWAIT_LEN
171 					 * to provide reliability equal to one
172 					 * provided by timewait state.
173 					 */
174 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
175 					 * timestamps. It must be less than
176 					 * minimal timewait lifetime.
177 					 */
178 /*
179  *	TCP option
180  */
181 
182 #define TCPOPT_NOP		1	/* Padding */
183 #define TCPOPT_EOL		0	/* End of options */
184 #define TCPOPT_MSS		2	/* Segment size negotiating */
185 #define TCPOPT_WINDOW		3	/* Window scaling */
186 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
187 #define TCPOPT_SACK             5       /* SACK Block */
188 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
189 #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
190 #define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
191 #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
192 #define TCPOPT_EXP		254	/* Experimental */
193 /* Magic number to be after the option value for sharing TCP
194  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
195  */
196 #define TCPOPT_FASTOPEN_MAGIC	0xF989
197 #define TCPOPT_SMC_MAGIC	0xE2D4C3D9
198 
199 /*
200  *     TCP option lengths
201  */
202 
203 #define TCPOLEN_MSS            4
204 #define TCPOLEN_WINDOW         3
205 #define TCPOLEN_SACK_PERM      2
206 #define TCPOLEN_TIMESTAMP      10
207 #define TCPOLEN_MD5SIG         18
208 #define TCPOLEN_FASTOPEN_BASE  2
209 #define TCPOLEN_EXP_FASTOPEN_BASE  4
210 #define TCPOLEN_EXP_SMC_BASE   6
211 
212 /* But this is what stacks really send out. */
213 #define TCPOLEN_TSTAMP_ALIGNED		12
214 #define TCPOLEN_WSCALE_ALIGNED		4
215 #define TCPOLEN_SACKPERM_ALIGNED	4
216 #define TCPOLEN_SACK_BASE		2
217 #define TCPOLEN_SACK_BASE_ALIGNED	4
218 #define TCPOLEN_SACK_PERBLOCK		8
219 #define TCPOLEN_MD5SIG_ALIGNED		20
220 #define TCPOLEN_MSS_ALIGNED		4
221 #define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
222 
223 /* Flags in tp->nonagle */
224 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
225 #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
226 #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
227 
228 /* TCP thin-stream limits */
229 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
230 
231 /* TCP initial congestion window as per rfc6928 */
232 #define TCP_INIT_CWND		10
233 
234 /* Bit Flags for sysctl_tcp_fastopen */
235 #define	TFO_CLIENT_ENABLE	1
236 #define	TFO_SERVER_ENABLE	2
237 #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
238 
239 /* Accept SYN data w/o any cookie option */
240 #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
241 
242 /* Force enable TFO on all listeners, i.e., not requiring the
243  * TCP_FASTOPEN socket option.
244  */
245 #define	TFO_SERVER_WO_SOCKOPT1	0x400
246 
247 
248 /* sysctl variables for tcp */
249 extern int sysctl_tcp_max_orphans;
250 extern long sysctl_tcp_mem[3];
251 
252 #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
253 #define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
254 #define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
255 
256 extern atomic_long_t tcp_memory_allocated;
257 extern struct percpu_counter tcp_sockets_allocated;
258 extern unsigned long tcp_memory_pressure;
259 
260 /* optimized version of sk_under_memory_pressure() for TCP sockets */
tcp_under_memory_pressure(const struct sock * sk)261 static inline bool tcp_under_memory_pressure(const struct sock *sk)
262 {
263 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
264 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
265 		return true;
266 
267 	return READ_ONCE(tcp_memory_pressure);
268 }
269 /*
270  * The next routines deal with comparing 32 bit unsigned ints
271  * and worry about wraparound (automatic with unsigned arithmetic).
272  */
273 
before(__u32 seq1,__u32 seq2)274 static inline bool before(__u32 seq1, __u32 seq2)
275 {
276         return (__s32)(seq1-seq2) < 0;
277 }
278 #define after(seq2, seq1) 	before(seq1, seq2)
279 
280 /* is s2<=s1<=s3 ? */
between(__u32 seq1,__u32 seq2,__u32 seq3)281 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
282 {
283 	return seq3 - seq2 >= seq1 - seq2;
284 }
285 
tcp_out_of_memory(struct sock * sk)286 static inline bool tcp_out_of_memory(struct sock *sk)
287 {
288 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
289 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
290 		return true;
291 	return false;
292 }
293 
294 void sk_forced_mem_schedule(struct sock *sk, int size);
295 
tcp_too_many_orphans(struct sock * sk,int shift)296 static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
297 {
298 	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
299 	int orphans = percpu_counter_read_positive(ocp);
300 
301 	if (orphans << shift > sysctl_tcp_max_orphans) {
302 		orphans = percpu_counter_sum_positive(ocp);
303 		if (orphans << shift > sysctl_tcp_max_orphans)
304 			return true;
305 	}
306 	return false;
307 }
308 
309 bool tcp_check_oom(struct sock *sk, int shift);
310 
311 
312 extern struct proto tcp_prot;
313 
314 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
315 #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
316 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
317 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
318 
319 void tcp_tasklet_init(void);
320 
321 int tcp_v4_err(struct sk_buff *skb, u32);
322 
323 void tcp_shutdown(struct sock *sk, int how);
324 
325 int tcp_v4_early_demux(struct sk_buff *skb);
326 int tcp_v4_rcv(struct sk_buff *skb);
327 
328 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
329 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
330 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
331 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
332 		 int flags);
333 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
334 			size_t size, int flags);
335 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
336 		 size_t size, int flags);
337 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
338 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
339 	      int size_goal);
340 void tcp_release_cb(struct sock *sk);
341 void tcp_wfree(struct sk_buff *skb);
342 void tcp_write_timer_handler(struct sock *sk);
343 void tcp_delack_timer_handler(struct sock *sk);
344 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
345 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
346 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
347 void tcp_rcv_space_adjust(struct sock *sk);
348 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
349 void tcp_twsk_destructor(struct sock *sk);
350 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
351 			struct pipe_inode_info *pipe, size_t len,
352 			unsigned int flags);
353 
tcp_dec_quickack_mode(struct sock * sk)354 static inline void tcp_dec_quickack_mode(struct sock *sk)
355 {
356 	struct inet_connection_sock *icsk = inet_csk(sk);
357 
358 	if (icsk->icsk_ack.quick) {
359 		/* How many ACKs S/ACKing new data have we sent? */
360 		const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
361 
362 		if (pkts >= icsk->icsk_ack.quick) {
363 			icsk->icsk_ack.quick = 0;
364 			/* Leaving quickack mode we deflate ATO. */
365 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
366 		} else
367 			icsk->icsk_ack.quick -= pkts;
368 	}
369 }
370 
371 #define	TCP_ECN_OK		1
372 #define	TCP_ECN_QUEUE_CWR	2
373 #define	TCP_ECN_DEMAND_CWR	4
374 #define	TCP_ECN_SEEN		8
375 
376 enum tcp_tw_status {
377 	TCP_TW_SUCCESS = 0,
378 	TCP_TW_RST = 1,
379 	TCP_TW_ACK = 2,
380 	TCP_TW_SYN = 3
381 };
382 
383 
384 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
385 					      struct sk_buff *skb,
386 					      const struct tcphdr *th);
387 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
388 			   struct request_sock *req, bool fastopen,
389 			   bool *lost_race);
390 int tcp_child_process(struct sock *parent, struct sock *child,
391 		      struct sk_buff *skb);
392 void tcp_enter_loss(struct sock *sk);
393 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
394 void tcp_clear_retrans(struct tcp_sock *tp);
395 void tcp_update_metrics(struct sock *sk);
396 void tcp_init_metrics(struct sock *sk);
397 void tcp_metrics_init(void);
398 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
399 void __tcp_close(struct sock *sk, long timeout);
400 void tcp_close(struct sock *sk, long timeout);
401 void tcp_init_sock(struct sock *sk);
402 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
403 __poll_t tcp_poll(struct file *file, struct socket *sock,
404 		      struct poll_table_struct *wait);
405 int tcp_getsockopt(struct sock *sk, int level, int optname,
406 		   char __user *optval, int __user *optlen);
407 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
408 		   unsigned int optlen);
409 void tcp_set_keepalive(struct sock *sk, int val);
410 void tcp_syn_ack_timeout(const struct request_sock *req);
411 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
412 		int flags, int *addr_len);
413 int tcp_set_rcvlowat(struct sock *sk, int val);
414 void tcp_data_ready(struct sock *sk);
415 #ifdef CONFIG_MMU
416 int tcp_mmap(struct file *file, struct socket *sock,
417 	     struct vm_area_struct *vma);
418 #endif
419 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
420 		       struct tcp_options_received *opt_rx,
421 		       int estab, struct tcp_fastopen_cookie *foc);
422 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
423 
424 /*
425  *	BPF SKB-less helpers
426  */
427 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
428 			 struct tcphdr *th, u32 *cookie);
429 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
430 			 struct tcphdr *th, u32 *cookie);
431 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
432 			  const struct tcp_request_sock_ops *af_ops,
433 			  struct sock *sk, struct tcphdr *th);
434 /*
435  *	TCP v4 functions exported for the inet6 API
436  */
437 
438 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
439 void tcp_v4_mtu_reduced(struct sock *sk);
440 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
441 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
442 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
443 struct sock *tcp_create_openreq_child(const struct sock *sk,
444 				      struct request_sock *req,
445 				      struct sk_buff *skb);
446 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
447 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
448 				  struct request_sock *req,
449 				  struct dst_entry *dst,
450 				  struct request_sock *req_unhash,
451 				  bool *own_req);
452 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
453 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
454 int tcp_connect(struct sock *sk);
455 enum tcp_synack_type {
456 	TCP_SYNACK_NORMAL,
457 	TCP_SYNACK_FASTOPEN,
458 	TCP_SYNACK_COOKIE,
459 };
460 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
461 				struct request_sock *req,
462 				struct tcp_fastopen_cookie *foc,
463 				enum tcp_synack_type synack_type,
464 				struct sk_buff *syn_skb);
465 int tcp_disconnect(struct sock *sk, int flags);
466 
467 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
468 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
469 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
470 
471 /* From syncookies.c */
472 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
473 				 struct request_sock *req,
474 				 struct dst_entry *dst, u32 tsoff);
475 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
476 		      u32 cookie);
477 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
478 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
479 					    const struct tcp_request_sock_ops *af_ops,
480 					    struct sock *sk, struct sk_buff *skb);
481 #ifdef CONFIG_SYN_COOKIES
482 
483 /* Syncookies use a monotonic timer which increments every 60 seconds.
484  * This counter is used both as a hash input and partially encoded into
485  * the cookie value.  A cookie is only validated further if the delta
486  * between the current counter value and the encoded one is less than this,
487  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
488  * the counter advances immediately after a cookie is generated).
489  */
490 #define MAX_SYNCOOKIE_AGE	2
491 #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
492 #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
493 
494 /* syncookies: remember time of last synqueue overflow
495  * But do not dirty this field too often (once per second is enough)
496  * It is racy as we do not hold a lock, but race is very minor.
497  */
tcp_synq_overflow(const struct sock * sk)498 static inline void tcp_synq_overflow(const struct sock *sk)
499 {
500 	unsigned int last_overflow;
501 	unsigned int now = jiffies;
502 
503 	if (sk->sk_reuseport) {
504 		struct sock_reuseport *reuse;
505 
506 		reuse = rcu_dereference(sk->sk_reuseport_cb);
507 		if (likely(reuse)) {
508 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
509 			if (!time_between32(now, last_overflow,
510 					    last_overflow + HZ))
511 				WRITE_ONCE(reuse->synq_overflow_ts, now);
512 			return;
513 		}
514 	}
515 
516 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
517 	if (!time_between32(now, last_overflow, last_overflow + HZ))
518 		WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
519 }
520 
521 /* syncookies: no recent synqueue overflow on this listening socket? */
tcp_synq_no_recent_overflow(const struct sock * sk)522 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
523 {
524 	unsigned int last_overflow;
525 	unsigned int now = jiffies;
526 
527 	if (sk->sk_reuseport) {
528 		struct sock_reuseport *reuse;
529 
530 		reuse = rcu_dereference(sk->sk_reuseport_cb);
531 		if (likely(reuse)) {
532 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
533 			return !time_between32(now, last_overflow - HZ,
534 					       last_overflow +
535 					       TCP_SYNCOOKIE_VALID);
536 		}
537 	}
538 
539 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
540 
541 	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
542 	 * then we're under synflood. However, we have to use
543 	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
544 	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
545 	 * jiffies but before we store .ts_recent_stamp into last_overflow,
546 	 * which could lead to rejecting a valid syncookie.
547 	 */
548 	return !time_between32(now, last_overflow - HZ,
549 			       last_overflow + TCP_SYNCOOKIE_VALID);
550 }
551 
tcp_cookie_time(void)552 static inline u32 tcp_cookie_time(void)
553 {
554 	u64 val = get_jiffies_64();
555 
556 	do_div(val, TCP_SYNCOOKIE_PERIOD);
557 	return val;
558 }
559 
560 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
561 			      u16 *mssp);
562 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
563 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
564 bool cookie_timestamp_decode(const struct net *net,
565 			     struct tcp_options_received *opt);
566 bool cookie_ecn_ok(const struct tcp_options_received *opt,
567 		   const struct net *net, const struct dst_entry *dst);
568 
569 /* From net/ipv6/syncookies.c */
570 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
571 		      u32 cookie);
572 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
573 
574 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
575 			      const struct tcphdr *th, u16 *mssp);
576 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
577 #endif
578 /* tcp_output.c */
579 
580 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
581 			       int nonagle);
582 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
583 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
584 void tcp_retransmit_timer(struct sock *sk);
585 void tcp_xmit_retransmit_queue(struct sock *);
586 void tcp_simple_retransmit(struct sock *);
587 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
588 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
589 enum tcp_queue {
590 	TCP_FRAG_IN_WRITE_QUEUE,
591 	TCP_FRAG_IN_RTX_QUEUE,
592 };
593 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
594 		 struct sk_buff *skb, u32 len,
595 		 unsigned int mss_now, gfp_t gfp);
596 
597 void tcp_send_probe0(struct sock *);
598 void tcp_send_partial(struct sock *);
599 int tcp_write_wakeup(struct sock *, int mib);
600 void tcp_send_fin(struct sock *sk);
601 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
602 int tcp_send_synack(struct sock *);
603 void tcp_push_one(struct sock *, unsigned int mss_now);
604 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
605 void tcp_send_ack(struct sock *sk);
606 void tcp_send_delayed_ack(struct sock *sk);
607 void tcp_send_loss_probe(struct sock *sk);
608 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
609 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
610 			     const struct sk_buff *next_skb);
611 
612 /* tcp_input.c */
613 void tcp_rearm_rto(struct sock *sk);
614 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
615 void tcp_reset(struct sock *sk);
616 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
617 void tcp_fin(struct sock *sk);
618 void tcp_check_space(struct sock *sk);
619 
620 /* tcp_timer.c */
621 void tcp_init_xmit_timers(struct sock *);
tcp_clear_xmit_timers(struct sock * sk)622 static inline void tcp_clear_xmit_timers(struct sock *sk)
623 {
624 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
625 		__sock_put(sk);
626 
627 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
628 		__sock_put(sk);
629 
630 	inet_csk_clear_xmit_timers(sk);
631 }
632 
633 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
634 unsigned int tcp_current_mss(struct sock *sk);
635 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
636 
637 /* Bound MSS / TSO packet size with the half of the window */
tcp_bound_to_half_wnd(struct tcp_sock * tp,int pktsize)638 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
639 {
640 	int cutoff;
641 
642 	/* When peer uses tiny windows, there is no use in packetizing
643 	 * to sub-MSS pieces for the sake of SWS or making sure there
644 	 * are enough packets in the pipe for fast recovery.
645 	 *
646 	 * On the other hand, for extremely large MSS devices, handling
647 	 * smaller than MSS windows in this way does make sense.
648 	 */
649 	if (tp->max_window > TCP_MSS_DEFAULT)
650 		cutoff = (tp->max_window >> 1);
651 	else
652 		cutoff = tp->max_window;
653 
654 	if (cutoff && pktsize > cutoff)
655 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
656 	else
657 		return pktsize;
658 }
659 
660 /* tcp.c */
661 void tcp_get_info(struct sock *, struct tcp_info *);
662 
663 /* Read 'sendfile()'-style from a TCP socket */
664 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
665 		  sk_read_actor_t recv_actor);
666 
667 void tcp_initialize_rcv_mss(struct sock *sk);
668 
669 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
670 int tcp_mss_to_mtu(struct sock *sk, int mss);
671 void tcp_mtup_init(struct sock *sk);
672 
tcp_bound_rto(const struct sock * sk)673 static inline void tcp_bound_rto(const struct sock *sk)
674 {
675 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
676 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
677 }
678 
__tcp_set_rto(const struct tcp_sock * tp)679 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
680 {
681 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
682 }
683 
__tcp_fast_path_on(struct tcp_sock * tp,u32 snd_wnd)684 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
685 {
686 	/* mptcp hooks are only on the slow path */
687 	if (sk_is_mptcp((struct sock *)tp))
688 		return;
689 
690 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
691 			       ntohl(TCP_FLAG_ACK) |
692 			       snd_wnd);
693 }
694 
tcp_fast_path_on(struct tcp_sock * tp)695 static inline void tcp_fast_path_on(struct tcp_sock *tp)
696 {
697 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
698 }
699 
tcp_fast_path_check(struct sock * sk)700 static inline void tcp_fast_path_check(struct sock *sk)
701 {
702 	struct tcp_sock *tp = tcp_sk(sk);
703 
704 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
705 	    tp->rcv_wnd &&
706 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
707 	    !tp->urg_data)
708 		tcp_fast_path_on(tp);
709 }
710 
711 /* Compute the actual rto_min value */
tcp_rto_min(struct sock * sk)712 static inline u32 tcp_rto_min(struct sock *sk)
713 {
714 	const struct dst_entry *dst = __sk_dst_get(sk);
715 	u32 rto_min = inet_csk(sk)->icsk_rto_min;
716 
717 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
718 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
719 	return rto_min;
720 }
721 
tcp_rto_min_us(struct sock * sk)722 static inline u32 tcp_rto_min_us(struct sock *sk)
723 {
724 	return jiffies_to_usecs(tcp_rto_min(sk));
725 }
726 
tcp_ca_dst_locked(const struct dst_entry * dst)727 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
728 {
729 	return dst_metric_locked(dst, RTAX_CC_ALGO);
730 }
731 
732 /* Minimum RTT in usec. ~0 means not available. */
tcp_min_rtt(const struct tcp_sock * tp)733 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
734 {
735 	return minmax_get(&tp->rtt_min);
736 }
737 
738 /* Compute the actual receive window we are currently advertising.
739  * Rcv_nxt can be after the window if our peer push more data
740  * than the offered window.
741  */
tcp_receive_window(const struct tcp_sock * tp)742 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
743 {
744 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
745 
746 	if (win < 0)
747 		win = 0;
748 	return (u32) win;
749 }
750 
751 /* Choose a new window, without checks for shrinking, and without
752  * scaling applied to the result.  The caller does these things
753  * if necessary.  This is a "raw" window selection.
754  */
755 u32 __tcp_select_window(struct sock *sk);
756 
757 void tcp_send_window_probe(struct sock *sk);
758 
759 /* TCP uses 32bit jiffies to save some space.
760  * Note that this is different from tcp_time_stamp, which
761  * historically has been the same until linux-4.13.
762  */
763 #define tcp_jiffies32 ((u32)jiffies)
764 
765 /*
766  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
767  * It is no longer tied to jiffies, but to 1 ms clock.
768  * Note: double check if you want to use tcp_jiffies32 instead of this.
769  */
770 #define TCP_TS_HZ	1000
771 
tcp_clock_ns(void)772 static inline u64 tcp_clock_ns(void)
773 {
774 	return ktime_get_ns();
775 }
776 
tcp_clock_us(void)777 static inline u64 tcp_clock_us(void)
778 {
779 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
780 }
781 
782 /* This should only be used in contexts where tp->tcp_mstamp is up to date */
tcp_time_stamp(const struct tcp_sock * tp)783 static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
784 {
785 	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
786 }
787 
788 /* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
tcp_ns_to_ts(u64 ns)789 static inline u64 tcp_ns_to_ts(u64 ns)
790 {
791 	return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
792 }
793 
794 /* Could use tcp_clock_us() / 1000, but this version uses a single divide */
tcp_time_stamp_raw(void)795 static inline u32 tcp_time_stamp_raw(void)
796 {
797 	return tcp_ns_to_ts(tcp_clock_ns());
798 }
799 
800 void tcp_mstamp_refresh(struct tcp_sock *tp);
801 
tcp_stamp_us_delta(u64 t1,u64 t0)802 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
803 {
804 	return max_t(s64, t1 - t0, 0);
805 }
806 
tcp_skb_timestamp(const struct sk_buff * skb)807 static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
808 {
809 	return tcp_ns_to_ts(skb->skb_mstamp_ns);
810 }
811 
812 /* provide the departure time in us unit */
tcp_skb_timestamp_us(const struct sk_buff * skb)813 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
814 {
815 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
816 }
817 
818 
819 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
820 
821 #define TCPHDR_FIN 0x01
822 #define TCPHDR_SYN 0x02
823 #define TCPHDR_RST 0x04
824 #define TCPHDR_PSH 0x08
825 #define TCPHDR_ACK 0x10
826 #define TCPHDR_URG 0x20
827 #define TCPHDR_ECE 0x40
828 #define TCPHDR_CWR 0x80
829 
830 #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
831 
832 /* This is what the send packet queuing engine uses to pass
833  * TCP per-packet control information to the transmission code.
834  * We also store the host-order sequence numbers in here too.
835  * This is 44 bytes if IPV6 is enabled.
836  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
837  */
838 struct tcp_skb_cb {
839 	__u32		seq;		/* Starting sequence number	*/
840 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
841 	union {
842 		/* Note : tcp_tw_isn is used in input path only
843 		 *	  (isn chosen by tcp_timewait_state_process())
844 		 *
845 		 * 	  tcp_gso_segs/size are used in write queue only,
846 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
847 		 */
848 		__u32		tcp_tw_isn;
849 		struct {
850 			u16	tcp_gso_segs;
851 			u16	tcp_gso_size;
852 		};
853 	};
854 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
855 
856 	__u8		sacked;		/* State flags for SACK.	*/
857 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
858 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
859 #define TCPCB_LOST		0x04	/* SKB is lost			*/
860 #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
861 #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp_ns)	*/
862 #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
863 #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
864 				TCPCB_REPAIRED)
865 
866 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
867 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
868 			eor:1,		/* Is skb MSG_EOR marked? */
869 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
870 			unused:5;
871 	__u32		ack_seq;	/* Sequence number ACK'd	*/
872 	union {
873 		struct {
874 			/* There is space for up to 24 bytes */
875 			__u32 in_flight:30,/* Bytes in flight at transmit */
876 			      is_app_limited:1, /* cwnd not fully used? */
877 			      unused:1;
878 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
879 			__u32 delivered;
880 			/* start of send pipeline phase */
881 			u64 first_tx_mstamp;
882 			/* when we reached the "delivered" count */
883 			u64 delivered_mstamp;
884 		} tx;   /* only used for outgoing skbs */
885 		union {
886 			struct inet_skb_parm	h4;
887 #if IS_ENABLED(CONFIG_IPV6)
888 			struct inet6_skb_parm	h6;
889 #endif
890 		} header;	/* For incoming skbs */
891 		struct {
892 			__u32 flags;
893 			struct sock *sk_redir;
894 			void *data_end;
895 		} bpf;
896 	};
897 };
898 
899 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
900 
bpf_compute_data_end_sk_skb(struct sk_buff * skb)901 static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
902 {
903 	TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
904 }
905 
tcp_skb_bpf_ingress(const struct sk_buff * skb)906 static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
907 {
908 	return TCP_SKB_CB(skb)->bpf.flags & BPF_F_INGRESS;
909 }
910 
tcp_skb_bpf_redirect_fetch(struct sk_buff * skb)911 static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
912 {
913 	return TCP_SKB_CB(skb)->bpf.sk_redir;
914 }
915 
tcp_skb_bpf_redirect_clear(struct sk_buff * skb)916 static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
917 {
918 	TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
919 }
920 
921 extern const struct inet_connection_sock_af_ops ipv4_specific;
922 
923 #if IS_ENABLED(CONFIG_IPV6)
924 /* This is the variant of inet6_iif() that must be used by TCP,
925  * as TCP moves IP6CB into a different location in skb->cb[]
926  */
tcp_v6_iif(const struct sk_buff * skb)927 static inline int tcp_v6_iif(const struct sk_buff *skb)
928 {
929 	return TCP_SKB_CB(skb)->header.h6.iif;
930 }
931 
tcp_v6_iif_l3_slave(const struct sk_buff * skb)932 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
933 {
934 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
935 
936 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
937 }
938 
939 /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v6_sdif(const struct sk_buff * skb)940 static inline int tcp_v6_sdif(const struct sk_buff *skb)
941 {
942 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
943 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
944 		return TCP_SKB_CB(skb)->header.h6.iif;
945 #endif
946 	return 0;
947 }
948 
949 extern const struct inet_connection_sock_af_ops ipv6_specific;
950 
951 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
952 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
953 void tcp_v6_early_demux(struct sk_buff *skb);
954 
955 #endif
956 
957 /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v4_sdif(struct sk_buff * skb)958 static inline int tcp_v4_sdif(struct sk_buff *skb)
959 {
960 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
961 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
962 		return TCP_SKB_CB(skb)->header.h4.iif;
963 #endif
964 	return 0;
965 }
966 
967 /* Due to TSO, an SKB can be composed of multiple actual
968  * packets.  To keep these tracked properly, we use this.
969  */
tcp_skb_pcount(const struct sk_buff * skb)970 static inline int tcp_skb_pcount(const struct sk_buff *skb)
971 {
972 	return TCP_SKB_CB(skb)->tcp_gso_segs;
973 }
974 
tcp_skb_pcount_set(struct sk_buff * skb,int segs)975 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
976 {
977 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
978 }
979 
tcp_skb_pcount_add(struct sk_buff * skb,int segs)980 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
981 {
982 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
983 }
984 
985 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
tcp_skb_mss(const struct sk_buff * skb)986 static inline int tcp_skb_mss(const struct sk_buff *skb)
987 {
988 	return TCP_SKB_CB(skb)->tcp_gso_size;
989 }
990 
tcp_skb_can_collapse_to(const struct sk_buff * skb)991 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
992 {
993 	return likely(!TCP_SKB_CB(skb)->eor);
994 }
995 
tcp_skb_can_collapse(const struct sk_buff * to,const struct sk_buff * from)996 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
997 					const struct sk_buff *from)
998 {
999 	return likely(tcp_skb_can_collapse_to(to) &&
1000 		      mptcp_skb_can_collapse(to, from));
1001 }
1002 
1003 /* Events passed to congestion control interface */
1004 enum tcp_ca_event {
1005 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
1006 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
1007 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
1008 	CA_EVENT_LOSS,		/* loss timeout */
1009 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
1010 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
1011 };
1012 
1013 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1014 enum tcp_ca_ack_event_flags {
1015 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
1016 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
1017 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
1018 };
1019 
1020 /*
1021  * Interface for adding new TCP congestion control handlers
1022  */
1023 #define TCP_CA_NAME_MAX	16
1024 #define TCP_CA_MAX	128
1025 #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1026 
1027 #define TCP_CA_UNSPEC	0
1028 
1029 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1030 #define TCP_CONG_NON_RESTRICTED 0x1
1031 /* Requires ECN/ECT set on all packets */
1032 #define TCP_CONG_NEEDS_ECN	0x2
1033 #define TCP_CONG_MASK	(TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1034 
1035 union tcp_cc_info;
1036 
1037 struct ack_sample {
1038 	u32 pkts_acked;
1039 	s32 rtt_us;
1040 	u32 in_flight;
1041 };
1042 
1043 /* A rate sample measures the number of (original/retransmitted) data
1044  * packets delivered "delivered" over an interval of time "interval_us".
1045  * The tcp_rate.c code fills in the rate sample, and congestion
1046  * control modules that define a cong_control function to run at the end
1047  * of ACK processing can optionally chose to consult this sample when
1048  * setting cwnd and pacing rate.
1049  * A sample is invalid if "delivered" or "interval_us" is negative.
1050  */
1051 struct rate_sample {
1052 	u64  prior_mstamp; /* starting timestamp for interval */
1053 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1054 	s32  delivered;		/* number of packets delivered over interval */
1055 	long interval_us;	/* time for tp->delivered to incr "delivered" */
1056 	u32 snd_interval_us;	/* snd interval for delivered packets */
1057 	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1058 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1059 	int  losses;		/* number of packets marked lost upon ACK */
1060 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1061 	u32  prior_in_flight;	/* in flight before this ACK */
1062 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1063 	bool is_retrans;	/* is sample from retransmission? */
1064 	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1065 };
1066 
1067 struct tcp_congestion_ops {
1068 	struct list_head	list;
1069 	u32 key;
1070 	u32 flags;
1071 
1072 	/* initialize private data (optional) */
1073 	void (*init)(struct sock *sk);
1074 	/* cleanup private data  (optional) */
1075 	void (*release)(struct sock *sk);
1076 
1077 	/* return slow start threshold (required) */
1078 	u32 (*ssthresh)(struct sock *sk);
1079 	/* do new cwnd calculation (required) */
1080 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1081 	/* call before changing ca_state (optional) */
1082 	void (*set_state)(struct sock *sk, u8 new_state);
1083 	/* call when cwnd event occurs (optional) */
1084 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1085 	/* call when ack arrives (optional) */
1086 	void (*in_ack_event)(struct sock *sk, u32 flags);
1087 	/* new value of cwnd after loss (required) */
1088 	u32  (*undo_cwnd)(struct sock *sk);
1089 	/* hook for packet ack accounting (optional) */
1090 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1091 	/* override sysctl_tcp_min_tso_segs */
1092 	u32 (*min_tso_segs)(struct sock *sk);
1093 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1094 	u32 (*sndbuf_expand)(struct sock *sk);
1095 	/* call when packets are delivered to update cwnd and pacing rate,
1096 	 * after all the ca_state processing. (optional)
1097 	 */
1098 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
1099 	/* get info for inet_diag (optional) */
1100 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1101 			   union tcp_cc_info *info);
1102 
1103 	char 		name[TCP_CA_NAME_MAX];
1104 	struct module 	*owner;
1105 };
1106 
1107 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1108 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1109 
1110 void tcp_assign_congestion_control(struct sock *sk);
1111 void tcp_init_congestion_control(struct sock *sk);
1112 void tcp_cleanup_congestion_control(struct sock *sk);
1113 int tcp_set_default_congestion_control(struct net *net, const char *name);
1114 void tcp_get_default_congestion_control(struct net *net, char *name);
1115 void tcp_get_available_congestion_control(char *buf, size_t len);
1116 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1117 int tcp_set_allowed_congestion_control(char *allowed);
1118 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1119 			       bool cap_net_admin);
1120 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1121 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1122 
1123 u32 tcp_reno_ssthresh(struct sock *sk);
1124 u32 tcp_reno_undo_cwnd(struct sock *sk);
1125 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1126 extern struct tcp_congestion_ops tcp_reno;
1127 
1128 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1129 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1130 u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1131 #ifdef CONFIG_INET
1132 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1133 #else
tcp_ca_get_name_by_key(u32 key,char * buffer)1134 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1135 {
1136 	return NULL;
1137 }
1138 #endif
1139 
tcp_ca_needs_ecn(const struct sock * sk)1140 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1141 {
1142 	const struct inet_connection_sock *icsk = inet_csk(sk);
1143 
1144 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1145 }
1146 
tcp_set_ca_state(struct sock * sk,const u8 ca_state)1147 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1148 {
1149 	struct inet_connection_sock *icsk = inet_csk(sk);
1150 
1151 	if (icsk->icsk_ca_ops->set_state)
1152 		icsk->icsk_ca_ops->set_state(sk, ca_state);
1153 	icsk->icsk_ca_state = ca_state;
1154 }
1155 
tcp_ca_event(struct sock * sk,const enum tcp_ca_event event)1156 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1157 {
1158 	const struct inet_connection_sock *icsk = inet_csk(sk);
1159 
1160 	if (icsk->icsk_ca_ops->cwnd_event)
1161 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1162 }
1163 
1164 /* From tcp_rate.c */
1165 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1166 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1167 			    struct rate_sample *rs);
1168 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1169 		  bool is_sack_reneg, struct rate_sample *rs);
1170 void tcp_rate_check_app_limited(struct sock *sk);
1171 
1172 /* These functions determine how the current flow behaves in respect of SACK
1173  * handling. SACK is negotiated with the peer, and therefore it can vary
1174  * between different flows.
1175  *
1176  * tcp_is_sack - SACK enabled
1177  * tcp_is_reno - No SACK
1178  */
tcp_is_sack(const struct tcp_sock * tp)1179 static inline int tcp_is_sack(const struct tcp_sock *tp)
1180 {
1181 	return likely(tp->rx_opt.sack_ok);
1182 }
1183 
tcp_is_reno(const struct tcp_sock * tp)1184 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1185 {
1186 	return !tcp_is_sack(tp);
1187 }
1188 
tcp_left_out(const struct tcp_sock * tp)1189 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1190 {
1191 	return tp->sacked_out + tp->lost_out;
1192 }
1193 
1194 /* This determines how many packets are "in the network" to the best
1195  * of our knowledge.  In many cases it is conservative, but where
1196  * detailed information is available from the receiver (via SACK
1197  * blocks etc.) we can make more aggressive calculations.
1198  *
1199  * Use this for decisions involving congestion control, use just
1200  * tp->packets_out to determine if the send queue is empty or not.
1201  *
1202  * Read this equation as:
1203  *
1204  *	"Packets sent once on transmission queue" MINUS
1205  *	"Packets left network, but not honestly ACKed yet" PLUS
1206  *	"Packets fast retransmitted"
1207  */
tcp_packets_in_flight(const struct tcp_sock * tp)1208 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1209 {
1210 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1211 }
1212 
1213 #define TCP_INFINITE_SSTHRESH	0x7fffffff
1214 
tcp_in_slow_start(const struct tcp_sock * tp)1215 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1216 {
1217 	return tp->snd_cwnd < tp->snd_ssthresh;
1218 }
1219 
tcp_in_initial_slowstart(const struct tcp_sock * tp)1220 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1221 {
1222 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1223 }
1224 
tcp_in_cwnd_reduction(const struct sock * sk)1225 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1226 {
1227 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1228 	       (1 << inet_csk(sk)->icsk_ca_state);
1229 }
1230 
1231 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1232  * The exception is cwnd reduction phase, when cwnd is decreasing towards
1233  * ssthresh.
1234  */
tcp_current_ssthresh(const struct sock * sk)1235 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1236 {
1237 	const struct tcp_sock *tp = tcp_sk(sk);
1238 
1239 	if (tcp_in_cwnd_reduction(sk))
1240 		return tp->snd_ssthresh;
1241 	else
1242 		return max(tp->snd_ssthresh,
1243 			   ((tp->snd_cwnd >> 1) +
1244 			    (tp->snd_cwnd >> 2)));
1245 }
1246 
1247 /* Use define here intentionally to get WARN_ON location shown at the caller */
1248 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1249 
1250 void tcp_enter_cwr(struct sock *sk);
1251 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1252 
1253 /* The maximum number of MSS of available cwnd for which TSO defers
1254  * sending if not using sysctl_tcp_tso_win_divisor.
1255  */
tcp_max_tso_deferred_mss(const struct tcp_sock * tp)1256 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1257 {
1258 	return 3;
1259 }
1260 
1261 /* Returns end sequence number of the receiver's advertised window */
tcp_wnd_end(const struct tcp_sock * tp)1262 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1263 {
1264 	return tp->snd_una + tp->snd_wnd;
1265 }
1266 
1267 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1268  * flexible approach. The RFC suggests cwnd should not be raised unless
1269  * it was fully used previously. And that's exactly what we do in
1270  * congestion avoidance mode. But in slow start we allow cwnd to grow
1271  * as long as the application has used half the cwnd.
1272  * Example :
1273  *    cwnd is 10 (IW10), but application sends 9 frames.
1274  *    We allow cwnd to reach 18 when all frames are ACKed.
1275  * This check is safe because it's as aggressive as slow start which already
1276  * risks 100% overshoot. The advantage is that we discourage application to
1277  * either send more filler packets or data to artificially blow up the cwnd
1278  * usage, and allow application-limited process to probe bw more aggressively.
1279  */
tcp_is_cwnd_limited(const struct sock * sk)1280 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1281 {
1282 	const struct tcp_sock *tp = tcp_sk(sk);
1283 
1284 	if (tp->is_cwnd_limited)
1285 		return true;
1286 
1287 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1288 	if (tcp_in_slow_start(tp))
1289 		return tp->snd_cwnd < 2 * tp->max_packets_out;
1290 
1291 	return false;
1292 }
1293 
1294 /* BBR congestion control needs pacing.
1295  * Same remark for SO_MAX_PACING_RATE.
1296  * sch_fq packet scheduler is efficiently handling pacing,
1297  * but is not always installed/used.
1298  * Return true if TCP stack should pace packets itself.
1299  */
tcp_needs_internal_pacing(const struct sock * sk)1300 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1301 {
1302 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1303 }
1304 
1305 /* Estimates in how many jiffies next packet for this flow can be sent.
1306  * Scheduling a retransmit timer too early would be silly.
1307  */
tcp_pacing_delay(const struct sock * sk)1308 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1309 {
1310 	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1311 
1312 	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1313 }
1314 
tcp_reset_xmit_timer(struct sock * sk,const int what,unsigned long when,const unsigned long max_when)1315 static inline void tcp_reset_xmit_timer(struct sock *sk,
1316 					const int what,
1317 					unsigned long when,
1318 					const unsigned long max_when)
1319 {
1320 	inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
1321 				  max_when);
1322 }
1323 
1324 /* Something is really bad, we could not queue an additional packet,
1325  * because qdisc is full or receiver sent a 0 window, or we are paced.
1326  * We do not want to add fuel to the fire, or abort too early,
1327  * so make sure the timer we arm now is at least 200ms in the future,
1328  * regardless of current icsk_rto value (as it could be ~2ms)
1329  */
tcp_probe0_base(const struct sock * sk)1330 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1331 {
1332 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1333 }
1334 
1335 /* Variant of inet_csk_rto_backoff() used for zero window probes */
tcp_probe0_when(const struct sock * sk,unsigned long max_when)1336 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1337 					    unsigned long max_when)
1338 {
1339 	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1340 
1341 	return (unsigned long)min_t(u64, when, max_when);
1342 }
1343 
tcp_check_probe_timer(struct sock * sk)1344 static inline void tcp_check_probe_timer(struct sock *sk)
1345 {
1346 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1347 		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1348 				     tcp_probe0_base(sk), TCP_RTO_MAX);
1349 }
1350 
tcp_init_wl(struct tcp_sock * tp,u32 seq)1351 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1352 {
1353 	tp->snd_wl1 = seq;
1354 }
1355 
tcp_update_wl(struct tcp_sock * tp,u32 seq)1356 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1357 {
1358 	tp->snd_wl1 = seq;
1359 }
1360 
1361 /*
1362  * Calculate(/check) TCP checksum
1363  */
tcp_v4_check(int len,__be32 saddr,__be32 daddr,__wsum base)1364 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1365 				   __be32 daddr, __wsum base)
1366 {
1367 	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1368 }
1369 
tcp_checksum_complete(struct sk_buff * skb)1370 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1371 {
1372 	return !skb_csum_unnecessary(skb) &&
1373 		__skb_checksum_complete(skb);
1374 }
1375 
1376 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1377 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1378 void tcp_set_state(struct sock *sk, int state);
1379 void tcp_done(struct sock *sk);
1380 int tcp_abort(struct sock *sk, int err);
1381 
tcp_sack_reset(struct tcp_options_received * rx_opt)1382 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1383 {
1384 	rx_opt->dsack = 0;
1385 	rx_opt->num_sacks = 0;
1386 }
1387 
1388 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1389 
tcp_slow_start_after_idle_check(struct sock * sk)1390 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1391 {
1392 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1393 	struct tcp_sock *tp = tcp_sk(sk);
1394 	s32 delta;
1395 
1396 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1397 	    tp->packets_out || ca_ops->cong_control)
1398 		return;
1399 	delta = tcp_jiffies32 - tp->lsndtime;
1400 	if (delta > inet_csk(sk)->icsk_rto)
1401 		tcp_cwnd_restart(sk, delta);
1402 }
1403 
1404 /* Determine a window scaling and initial window to offer. */
1405 void tcp_select_initial_window(const struct sock *sk, int __space,
1406 			       __u32 mss, __u32 *rcv_wnd,
1407 			       __u32 *window_clamp, int wscale_ok,
1408 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1409 
tcp_win_from_space(const struct sock * sk,int space)1410 static inline int tcp_win_from_space(const struct sock *sk, int space)
1411 {
1412 	int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
1413 
1414 	return tcp_adv_win_scale <= 0 ?
1415 		(space>>(-tcp_adv_win_scale)) :
1416 		space - (space>>tcp_adv_win_scale);
1417 }
1418 
1419 /* Note: caller must be prepared to deal with negative returns */
tcp_space(const struct sock * sk)1420 static inline int tcp_space(const struct sock *sk)
1421 {
1422 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1423 				  READ_ONCE(sk->sk_backlog.len) -
1424 				  atomic_read(&sk->sk_rmem_alloc));
1425 }
1426 
tcp_full_space(const struct sock * sk)1427 static inline int tcp_full_space(const struct sock *sk)
1428 {
1429 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1430 }
1431 
1432 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1433 
1434 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1435  * If 87.5 % (7/8) of the space has been consumed, we want to override
1436  * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1437  * len/truesize ratio.
1438  */
tcp_rmem_pressure(const struct sock * sk)1439 static inline bool tcp_rmem_pressure(const struct sock *sk)
1440 {
1441 	int rcvbuf, threshold;
1442 
1443 	if (tcp_under_memory_pressure(sk))
1444 		return true;
1445 
1446 	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1447 	threshold = rcvbuf - (rcvbuf >> 3);
1448 
1449 	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1450 }
1451 
1452 extern void tcp_openreq_init_rwin(struct request_sock *req,
1453 				  const struct sock *sk_listener,
1454 				  const struct dst_entry *dst);
1455 
1456 void tcp_enter_memory_pressure(struct sock *sk);
1457 void tcp_leave_memory_pressure(struct sock *sk);
1458 
keepalive_intvl_when(const struct tcp_sock * tp)1459 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1460 {
1461 	struct net *net = sock_net((struct sock *)tp);
1462 	int val;
1463 
1464 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1465 	 * and do_tcp_setsockopt().
1466 	 */
1467 	val = READ_ONCE(tp->keepalive_intvl);
1468 
1469 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1470 }
1471 
keepalive_time_when(const struct tcp_sock * tp)1472 static inline int keepalive_time_when(const struct tcp_sock *tp)
1473 {
1474 	struct net *net = sock_net((struct sock *)tp);
1475 	int val;
1476 
1477 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1478 	val = READ_ONCE(tp->keepalive_time);
1479 
1480 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1481 }
1482 
keepalive_probes(const struct tcp_sock * tp)1483 static inline int keepalive_probes(const struct tcp_sock *tp)
1484 {
1485 	struct net *net = sock_net((struct sock *)tp);
1486 	int val;
1487 
1488 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1489 	 * and do_tcp_setsockopt().
1490 	 */
1491 	val = READ_ONCE(tp->keepalive_probes);
1492 
1493 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1494 }
1495 
keepalive_time_elapsed(const struct tcp_sock * tp)1496 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1497 {
1498 	const struct inet_connection_sock *icsk = &tp->inet_conn;
1499 
1500 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1501 			  tcp_jiffies32 - tp->rcv_tstamp);
1502 }
1503 
tcp_fin_time(const struct sock * sk)1504 static inline int tcp_fin_time(const struct sock *sk)
1505 {
1506 	int fin_timeout = tcp_sk(sk)->linger2 ? :
1507 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1508 	const int rto = inet_csk(sk)->icsk_rto;
1509 
1510 	if (fin_timeout < (rto << 2) - (rto >> 1))
1511 		fin_timeout = (rto << 2) - (rto >> 1);
1512 
1513 	return fin_timeout;
1514 }
1515 
tcp_paws_check(const struct tcp_options_received * rx_opt,int paws_win)1516 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1517 				  int paws_win)
1518 {
1519 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1520 		return true;
1521 	if (unlikely(!time_before32(ktime_get_seconds(),
1522 				    rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
1523 		return true;
1524 	/*
1525 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1526 	 * then following tcp messages have valid values. Ignore 0 value,
1527 	 * or else 'negative' tsval might forbid us to accept their packets.
1528 	 */
1529 	if (!rx_opt->ts_recent)
1530 		return true;
1531 	return false;
1532 }
1533 
tcp_paws_reject(const struct tcp_options_received * rx_opt,int rst)1534 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1535 				   int rst)
1536 {
1537 	if (tcp_paws_check(rx_opt, 0))
1538 		return false;
1539 
1540 	/* RST segments are not recommended to carry timestamp,
1541 	   and, if they do, it is recommended to ignore PAWS because
1542 	   "their cleanup function should take precedence over timestamps."
1543 	   Certainly, it is mistake. It is necessary to understand the reasons
1544 	   of this constraint to relax it: if peer reboots, clock may go
1545 	   out-of-sync and half-open connections will not be reset.
1546 	   Actually, the problem would be not existing if all
1547 	   the implementations followed draft about maintaining clock
1548 	   via reboots. Linux-2.2 DOES NOT!
1549 
1550 	   However, we can relax time bounds for RST segments to MSL.
1551 	 */
1552 	if (rst && !time_before32(ktime_get_seconds(),
1553 				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1554 		return false;
1555 	return true;
1556 }
1557 
1558 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1559 			  int mib_idx, u32 *last_oow_ack_time);
1560 
tcp_mib_init(struct net * net)1561 static inline void tcp_mib_init(struct net *net)
1562 {
1563 	/* See RFC 2012 */
1564 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1565 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1566 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1567 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1568 }
1569 
1570 /* from STCP */
tcp_clear_retrans_hints_partial(struct tcp_sock * tp)1571 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1572 {
1573 	tp->lost_skb_hint = NULL;
1574 }
1575 
tcp_clear_all_retrans_hints(struct tcp_sock * tp)1576 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1577 {
1578 	tcp_clear_retrans_hints_partial(tp);
1579 	tp->retransmit_skb_hint = NULL;
1580 }
1581 
1582 union tcp_md5_addr {
1583 	struct in_addr  a4;
1584 #if IS_ENABLED(CONFIG_IPV6)
1585 	struct in6_addr	a6;
1586 #endif
1587 };
1588 
1589 /* - key database */
1590 struct tcp_md5sig_key {
1591 	struct hlist_node	node;
1592 	u8			keylen;
1593 	u8			family; /* AF_INET or AF_INET6 */
1594 	u8			prefixlen;
1595 	union tcp_md5_addr	addr;
1596 	int			l3index; /* set if key added with L3 scope */
1597 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1598 	struct rcu_head		rcu;
1599 };
1600 
1601 /* - sock block */
1602 struct tcp_md5sig_info {
1603 	struct hlist_head	head;
1604 	struct rcu_head		rcu;
1605 };
1606 
1607 /* - pseudo header */
1608 struct tcp4_pseudohdr {
1609 	__be32		saddr;
1610 	__be32		daddr;
1611 	__u8		pad;
1612 	__u8		protocol;
1613 	__be16		len;
1614 };
1615 
1616 struct tcp6_pseudohdr {
1617 	struct in6_addr	saddr;
1618 	struct in6_addr daddr;
1619 	__be32		len;
1620 	__be32		protocol;	/* including padding */
1621 };
1622 
1623 union tcp_md5sum_block {
1624 	struct tcp4_pseudohdr ip4;
1625 #if IS_ENABLED(CONFIG_IPV6)
1626 	struct tcp6_pseudohdr ip6;
1627 #endif
1628 };
1629 
1630 /* - pool: digest algorithm, hash description and scratch buffer */
1631 struct tcp_md5sig_pool {
1632 	struct ahash_request	*md5_req;
1633 	void			*scratch;
1634 };
1635 
1636 /* - functions */
1637 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1638 			const struct sock *sk, const struct sk_buff *skb);
1639 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1640 		   int family, u8 prefixlen, int l3index,
1641 		   const u8 *newkey, u8 newkeylen, gfp_t gfp);
1642 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1643 		   int family, u8 prefixlen, int l3index);
1644 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1645 					 const struct sock *addr_sk);
1646 
1647 #ifdef CONFIG_TCP_MD5SIG
1648 #include <linux/jump_label.h>
1649 extern struct static_key_false tcp_md5_needed;
1650 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1651 					   const union tcp_md5_addr *addr,
1652 					   int family);
1653 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)1654 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1655 		  const union tcp_md5_addr *addr, int family)
1656 {
1657 	if (!static_branch_unlikely(&tcp_md5_needed))
1658 		return NULL;
1659 	return __tcp_md5_do_lookup(sk, l3index, addr, family);
1660 }
1661 
1662 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1663 #else
1664 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)1665 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1666 		  const union tcp_md5_addr *addr, int family)
1667 {
1668 	return NULL;
1669 }
1670 #define tcp_twsk_md5_key(twsk)	NULL
1671 #endif
1672 
1673 bool tcp_alloc_md5sig_pool(void);
1674 
1675 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
tcp_put_md5sig_pool(void)1676 static inline void tcp_put_md5sig_pool(void)
1677 {
1678 	local_bh_enable();
1679 }
1680 
1681 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1682 			  unsigned int header_len);
1683 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1684 		     const struct tcp_md5sig_key *key);
1685 
1686 /* From tcp_fastopen.c */
1687 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1688 			    struct tcp_fastopen_cookie *cookie);
1689 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1690 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1691 			    u16 try_exp);
1692 struct tcp_fastopen_request {
1693 	/* Fast Open cookie. Size 0 means a cookie request */
1694 	struct tcp_fastopen_cookie	cookie;
1695 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1696 	size_t				size;
1697 	int				copied;	/* queued in tcp_connect() */
1698 	struct ubuf_info		*uarg;
1699 };
1700 void tcp_free_fastopen_req(struct tcp_sock *tp);
1701 void tcp_fastopen_destroy_cipher(struct sock *sk);
1702 void tcp_fastopen_ctx_destroy(struct net *net);
1703 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1704 			      void *primary_key, void *backup_key);
1705 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
1706 			    u64 *key);
1707 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1708 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1709 			      struct request_sock *req,
1710 			      struct tcp_fastopen_cookie *foc,
1711 			      const struct dst_entry *dst);
1712 void tcp_fastopen_init_key_once(struct net *net);
1713 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1714 			     struct tcp_fastopen_cookie *cookie);
1715 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1716 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
1717 #define TCP_FASTOPEN_KEY_MAX 2
1718 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
1719 	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
1720 
1721 /* Fastopen key context */
1722 struct tcp_fastopen_context {
1723 	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
1724 	int		num;
1725 	struct rcu_head	rcu;
1726 };
1727 
1728 extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1729 void tcp_fastopen_active_disable(struct sock *sk);
1730 bool tcp_fastopen_active_should_disable(struct sock *sk);
1731 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1732 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1733 
1734 /* Caller needs to wrap with rcu_read_(un)lock() */
1735 static inline
tcp_fastopen_get_ctx(const struct sock * sk)1736 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
1737 {
1738 	struct tcp_fastopen_context *ctx;
1739 
1740 	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
1741 	if (!ctx)
1742 		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
1743 	return ctx;
1744 }
1745 
1746 static inline
tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie * foc,const struct tcp_fastopen_cookie * orig)1747 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
1748 			       const struct tcp_fastopen_cookie *orig)
1749 {
1750 	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
1751 	    orig->len == foc->len &&
1752 	    !memcmp(orig->val, foc->val, foc->len))
1753 		return true;
1754 	return false;
1755 }
1756 
1757 static inline
tcp_fastopen_context_len(const struct tcp_fastopen_context * ctx)1758 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
1759 {
1760 	return ctx->num;
1761 }
1762 
1763 /* Latencies incurred by various limits for a sender. They are
1764  * chronograph-like stats that are mutually exclusive.
1765  */
1766 enum tcp_chrono {
1767 	TCP_CHRONO_UNSPEC,
1768 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1769 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1770 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1771 	__TCP_CHRONO_MAX,
1772 };
1773 
1774 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1775 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1776 
1777 /* This helper is needed, because skb->tcp_tsorted_anchor uses
1778  * the same memory storage than skb->destructor/_skb_refdst
1779  */
tcp_skb_tsorted_anchor_cleanup(struct sk_buff * skb)1780 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1781 {
1782 	skb->destructor = NULL;
1783 	skb->_skb_refdst = 0UL;
1784 }
1785 
1786 #define tcp_skb_tsorted_save(skb) {		\
1787 	unsigned long _save = skb->_skb_refdst;	\
1788 	skb->_skb_refdst = 0UL;
1789 
1790 #define tcp_skb_tsorted_restore(skb)		\
1791 	skb->_skb_refdst = _save;		\
1792 }
1793 
1794 void tcp_write_queue_purge(struct sock *sk);
1795 
tcp_rtx_queue_head(const struct sock * sk)1796 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1797 {
1798 	return skb_rb_first(&sk->tcp_rtx_queue);
1799 }
1800 
tcp_rtx_queue_tail(const struct sock * sk)1801 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1802 {
1803 	return skb_rb_last(&sk->tcp_rtx_queue);
1804 }
1805 
tcp_write_queue_head(const struct sock * sk)1806 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1807 {
1808 	return skb_peek(&sk->sk_write_queue);
1809 }
1810 
tcp_write_queue_tail(const struct sock * sk)1811 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1812 {
1813 	return skb_peek_tail(&sk->sk_write_queue);
1814 }
1815 
1816 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1817 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1818 
tcp_send_head(const struct sock * sk)1819 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1820 {
1821 	return skb_peek(&sk->sk_write_queue);
1822 }
1823 
tcp_skb_is_last(const struct sock * sk,const struct sk_buff * skb)1824 static inline bool tcp_skb_is_last(const struct sock *sk,
1825 				   const struct sk_buff *skb)
1826 {
1827 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1828 }
1829 
1830 /**
1831  * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
1832  * @sk: socket
1833  *
1834  * Since the write queue can have a temporary empty skb in it,
1835  * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
1836  */
tcp_write_queue_empty(const struct sock * sk)1837 static inline bool tcp_write_queue_empty(const struct sock *sk)
1838 {
1839 	const struct tcp_sock *tp = tcp_sk(sk);
1840 
1841 	return tp->write_seq == tp->snd_nxt;
1842 }
1843 
tcp_rtx_queue_empty(const struct sock * sk)1844 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1845 {
1846 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1847 }
1848 
tcp_rtx_and_write_queues_empty(const struct sock * sk)1849 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1850 {
1851 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1852 }
1853 
tcp_add_write_queue_tail(struct sock * sk,struct sk_buff * skb)1854 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1855 {
1856 	__skb_queue_tail(&sk->sk_write_queue, skb);
1857 
1858 	/* Queue it, remembering where we must start sending. */
1859 	if (sk->sk_write_queue.next == skb)
1860 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1861 }
1862 
1863 /* Insert new before skb on the write queue of sk.  */
tcp_insert_write_queue_before(struct sk_buff * new,struct sk_buff * skb,struct sock * sk)1864 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1865 						  struct sk_buff *skb,
1866 						  struct sock *sk)
1867 {
1868 	__skb_queue_before(&sk->sk_write_queue, skb, new);
1869 }
1870 
tcp_unlink_write_queue(struct sk_buff * skb,struct sock * sk)1871 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1872 {
1873 	tcp_skb_tsorted_anchor_cleanup(skb);
1874 	__skb_unlink(skb, &sk->sk_write_queue);
1875 }
1876 
1877 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1878 
tcp_rtx_queue_unlink(struct sk_buff * skb,struct sock * sk)1879 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1880 {
1881 	tcp_skb_tsorted_anchor_cleanup(skb);
1882 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1883 }
1884 
tcp_rtx_queue_unlink_and_free(struct sk_buff * skb,struct sock * sk)1885 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1886 {
1887 	list_del(&skb->tcp_tsorted_anchor);
1888 	tcp_rtx_queue_unlink(skb, sk);
1889 	sk_wmem_free_skb(sk, skb);
1890 }
1891 
tcp_push_pending_frames(struct sock * sk)1892 static inline void tcp_push_pending_frames(struct sock *sk)
1893 {
1894 	if (tcp_send_head(sk)) {
1895 		struct tcp_sock *tp = tcp_sk(sk);
1896 
1897 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1898 	}
1899 }
1900 
1901 /* Start sequence of the skb just after the highest skb with SACKed
1902  * bit, valid only if sacked_out > 0 or when the caller has ensured
1903  * validity by itself.
1904  */
tcp_highest_sack_seq(struct tcp_sock * tp)1905 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1906 {
1907 	if (!tp->sacked_out)
1908 		return tp->snd_una;
1909 
1910 	if (tp->highest_sack == NULL)
1911 		return tp->snd_nxt;
1912 
1913 	return TCP_SKB_CB(tp->highest_sack)->seq;
1914 }
1915 
tcp_advance_highest_sack(struct sock * sk,struct sk_buff * skb)1916 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1917 {
1918 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
1919 }
1920 
tcp_highest_sack(struct sock * sk)1921 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1922 {
1923 	return tcp_sk(sk)->highest_sack;
1924 }
1925 
tcp_highest_sack_reset(struct sock * sk)1926 static inline void tcp_highest_sack_reset(struct sock *sk)
1927 {
1928 	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
1929 }
1930 
1931 /* Called when old skb is about to be deleted and replaced by new skb */
tcp_highest_sack_replace(struct sock * sk,struct sk_buff * old,struct sk_buff * new)1932 static inline void tcp_highest_sack_replace(struct sock *sk,
1933 					    struct sk_buff *old,
1934 					    struct sk_buff *new)
1935 {
1936 	if (old == tcp_highest_sack(sk))
1937 		tcp_sk(sk)->highest_sack = new;
1938 }
1939 
1940 /* This helper checks if socket has IP_TRANSPARENT set */
inet_sk_transparent(const struct sock * sk)1941 static inline bool inet_sk_transparent(const struct sock *sk)
1942 {
1943 	switch (sk->sk_state) {
1944 	case TCP_TIME_WAIT:
1945 		return inet_twsk(sk)->tw_transparent;
1946 	case TCP_NEW_SYN_RECV:
1947 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1948 	}
1949 	return inet_sk(sk)->transparent;
1950 }
1951 
1952 /* Determines whether this is a thin stream (which may suffer from
1953  * increased latency). Used to trigger latency-reducing mechanisms.
1954  */
tcp_stream_is_thin(struct tcp_sock * tp)1955 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1956 {
1957 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1958 }
1959 
1960 /* /proc */
1961 enum tcp_seq_states {
1962 	TCP_SEQ_STATE_LISTENING,
1963 	TCP_SEQ_STATE_ESTABLISHED,
1964 };
1965 
1966 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
1967 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
1968 void tcp_seq_stop(struct seq_file *seq, void *v);
1969 
1970 struct tcp_seq_afinfo {
1971 	sa_family_t			family;
1972 };
1973 
1974 struct tcp_iter_state {
1975 	struct seq_net_private	p;
1976 	enum tcp_seq_states	state;
1977 	struct sock		*syn_wait_sk;
1978 	struct tcp_seq_afinfo	*bpf_seq_afinfo;
1979 	int			bucket, offset, sbucket, num;
1980 	loff_t			last_pos;
1981 };
1982 
1983 extern struct request_sock_ops tcp_request_sock_ops;
1984 extern struct request_sock_ops tcp6_request_sock_ops;
1985 
1986 void tcp_v4_destroy_sock(struct sock *sk);
1987 
1988 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1989 				netdev_features_t features);
1990 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
1991 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
1992 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
1993 INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
1994 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
1995 int tcp_gro_complete(struct sk_buff *skb);
1996 
1997 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1998 
tcp_notsent_lowat(const struct tcp_sock * tp)1999 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2000 {
2001 	struct net *net = sock_net((struct sock *)tp);
2002 	u32 val;
2003 
2004 	val = READ_ONCE(tp->notsent_lowat);
2005 
2006 	return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2007 }
2008 
2009 /* @wake is one when sk_stream_write_space() calls us.
2010  * This sends EPOLLOUT only if notsent_bytes is half the limit.
2011  * This mimics the strategy used in sock_def_write_space().
2012  */
tcp_stream_memory_free(const struct sock * sk,int wake)2013 static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
2014 {
2015 	const struct tcp_sock *tp = tcp_sk(sk);
2016 	u32 notsent_bytes = READ_ONCE(tp->write_seq) -
2017 			    READ_ONCE(tp->snd_nxt);
2018 
2019 	return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
2020 }
2021 
2022 #ifdef CONFIG_PROC_FS
2023 int tcp4_proc_init(void);
2024 void tcp4_proc_exit(void);
2025 #endif
2026 
2027 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2028 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2029 		     const struct tcp_request_sock_ops *af_ops,
2030 		     struct sock *sk, struct sk_buff *skb);
2031 
2032 /* TCP af-specific functions */
2033 struct tcp_sock_af_ops {
2034 #ifdef CONFIG_TCP_MD5SIG
2035 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2036 						const struct sock *addr_sk);
2037 	int		(*calc_md5_hash)(char *location,
2038 					 const struct tcp_md5sig_key *md5,
2039 					 const struct sock *sk,
2040 					 const struct sk_buff *skb);
2041 	int		(*md5_parse)(struct sock *sk,
2042 				     int optname,
2043 				     sockptr_t optval,
2044 				     int optlen);
2045 #endif
2046 };
2047 
2048 struct tcp_request_sock_ops {
2049 	u16 mss_clamp;
2050 #ifdef CONFIG_TCP_MD5SIG
2051 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2052 						 const struct sock *addr_sk);
2053 	int		(*calc_md5_hash) (char *location,
2054 					  const struct tcp_md5sig_key *md5,
2055 					  const struct sock *sk,
2056 					  const struct sk_buff *skb);
2057 #endif
2058 	void (*init_req)(struct request_sock *req,
2059 			 const struct sock *sk_listener,
2060 			 struct sk_buff *skb);
2061 #ifdef CONFIG_SYN_COOKIES
2062 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2063 				 __u16 *mss);
2064 #endif
2065 	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
2066 				       const struct request_sock *req);
2067 	u32 (*init_seq)(const struct sk_buff *skb);
2068 	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2069 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2070 			   struct flowi *fl, struct request_sock *req,
2071 			   struct tcp_fastopen_cookie *foc,
2072 			   enum tcp_synack_type synack_type,
2073 			   struct sk_buff *syn_skb);
2074 };
2075 
2076 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2077 #if IS_ENABLED(CONFIG_IPV6)
2078 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2079 #endif
2080 
2081 #ifdef CONFIG_SYN_COOKIES
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2082 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2083 					 const struct sock *sk, struct sk_buff *skb,
2084 					 __u16 *mss)
2085 {
2086 	tcp_synq_overflow(sk);
2087 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2088 	return ops->cookie_init_seq(skb, mss);
2089 }
2090 #else
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2091 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2092 					 const struct sock *sk, struct sk_buff *skb,
2093 					 __u16 *mss)
2094 {
2095 	return 0;
2096 }
2097 #endif
2098 
2099 int tcpv4_offload_init(void);
2100 
2101 void tcp_v4_init(void);
2102 void tcp_init(void);
2103 
2104 /* tcp_recovery.c */
2105 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2106 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2107 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2108 				u32 reo_wnd);
2109 extern bool tcp_rack_mark_lost(struct sock *sk);
2110 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
2111 			     u64 xmit_time);
2112 extern void tcp_rack_reo_timeout(struct sock *sk);
2113 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
2114 
2115 /* At how many usecs into the future should the RTO fire? */
tcp_rto_delta_us(const struct sock * sk)2116 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2117 {
2118 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2119 	u32 rto = inet_csk(sk)->icsk_rto;
2120 	u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2121 
2122 	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2123 }
2124 
2125 /*
2126  * Save and compile IPv4 options, return a pointer to it
2127  */
tcp_v4_save_options(struct net * net,struct sk_buff * skb)2128 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2129 							 struct sk_buff *skb)
2130 {
2131 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2132 	struct ip_options_rcu *dopt = NULL;
2133 
2134 	if (opt->optlen) {
2135 		int opt_size = sizeof(*dopt) + opt->optlen;
2136 
2137 		dopt = kmalloc(opt_size, GFP_ATOMIC);
2138 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2139 			kfree(dopt);
2140 			dopt = NULL;
2141 		}
2142 	}
2143 	return dopt;
2144 }
2145 
2146 /* locally generated TCP pure ACKs have skb->truesize == 2
2147  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2148  * This is much faster than dissecting the packet to find out.
2149  * (Think of GRE encapsulations, IPv4, IPv6, ...)
2150  */
skb_is_tcp_pure_ack(const struct sk_buff * skb)2151 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2152 {
2153 	return skb->truesize == 2;
2154 }
2155 
skb_set_tcp_pure_ack(struct sk_buff * skb)2156 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2157 {
2158 	skb->truesize = 2;
2159 }
2160 
tcp_inq(struct sock * sk)2161 static inline int tcp_inq(struct sock *sk)
2162 {
2163 	struct tcp_sock *tp = tcp_sk(sk);
2164 	int answ;
2165 
2166 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2167 		answ = 0;
2168 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2169 		   !tp->urg_data ||
2170 		   before(tp->urg_seq, tp->copied_seq) ||
2171 		   !before(tp->urg_seq, tp->rcv_nxt)) {
2172 
2173 		answ = tp->rcv_nxt - tp->copied_seq;
2174 
2175 		/* Subtract 1, if FIN was received */
2176 		if (answ && sock_flag(sk, SOCK_DONE))
2177 			answ--;
2178 	} else {
2179 		answ = tp->urg_seq - tp->copied_seq;
2180 	}
2181 
2182 	return answ;
2183 }
2184 
2185 int tcp_peek_len(struct socket *sock);
2186 
tcp_segs_in(struct tcp_sock * tp,const struct sk_buff * skb)2187 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2188 {
2189 	u16 segs_in;
2190 
2191 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2192 	tp->segs_in += segs_in;
2193 	if (skb->len > tcp_hdrlen(skb))
2194 		tp->data_segs_in += segs_in;
2195 }
2196 
2197 /*
2198  * TCP listen path runs lockless.
2199  * We forced "struct sock" to be const qualified to make sure
2200  * we don't modify one of its field by mistake.
2201  * Here, we increment sk_drops which is an atomic_t, so we can safely
2202  * make sock writable again.
2203  */
tcp_listendrop(const struct sock * sk)2204 static inline void tcp_listendrop(const struct sock *sk)
2205 {
2206 	atomic_inc(&((struct sock *)sk)->sk_drops);
2207 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2208 }
2209 
2210 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2211 
2212 /*
2213  * Interface for adding Upper Level Protocols over TCP
2214  */
2215 
2216 #define TCP_ULP_NAME_MAX	16
2217 #define TCP_ULP_MAX		128
2218 #define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2219 
2220 struct tcp_ulp_ops {
2221 	struct list_head	list;
2222 
2223 	/* initialize ulp */
2224 	int (*init)(struct sock *sk);
2225 	/* update ulp */
2226 	void (*update)(struct sock *sk, struct proto *p,
2227 		       void (*write_space)(struct sock *sk));
2228 	/* cleanup ulp */
2229 	void (*release)(struct sock *sk);
2230 	/* diagnostic */
2231 	int (*get_info)(const struct sock *sk, struct sk_buff *skb);
2232 	size_t (*get_info_size)(const struct sock *sk);
2233 	/* clone ulp */
2234 	void (*clone)(const struct request_sock *req, struct sock *newsk,
2235 		      const gfp_t priority);
2236 
2237 	char		name[TCP_ULP_NAME_MAX];
2238 	struct module	*owner;
2239 };
2240 int tcp_register_ulp(struct tcp_ulp_ops *type);
2241 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2242 int tcp_set_ulp(struct sock *sk, const char *name);
2243 void tcp_get_available_ulp(char *buf, size_t len);
2244 void tcp_cleanup_ulp(struct sock *sk);
2245 void tcp_update_ulp(struct sock *sk, struct proto *p,
2246 		    void (*write_space)(struct sock *sk));
2247 
2248 #define MODULE_ALIAS_TCP_ULP(name)				\
2249 	__MODULE_INFO(alias, alias_userspace, name);		\
2250 	__MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2251 
2252 struct sk_msg;
2253 struct sk_psock;
2254 
2255 #ifdef CONFIG_BPF_STREAM_PARSER
2256 struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock);
2257 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2258 #else
tcp_bpf_clone(const struct sock * sk,struct sock * newsk)2259 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2260 {
2261 }
2262 #endif /* CONFIG_BPF_STREAM_PARSER */
2263 
2264 #ifdef CONFIG_NET_SOCK_MSG
2265 int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg, u32 bytes,
2266 			  int flags);
2267 int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
2268 		      struct msghdr *msg, int len, int flags);
2269 #endif /* CONFIG_NET_SOCK_MSG */
2270 
2271 #ifdef CONFIG_CGROUP_BPF
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2272 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2273 				      struct sk_buff *skb,
2274 				      unsigned int end_offset)
2275 {
2276 	skops->skb = skb;
2277 	skops->skb_data_end = skb->data + end_offset;
2278 }
2279 #else
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2280 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2281 				      struct sk_buff *skb,
2282 				      unsigned int end_offset)
2283 {
2284 }
2285 #endif
2286 
2287 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2288  * is < 0, then the BPF op failed (for example if the loaded BPF
2289  * program does not support the chosen operation or there is no BPF
2290  * program loaded).
2291  */
2292 #ifdef CONFIG_BPF
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2293 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2294 {
2295 	struct bpf_sock_ops_kern sock_ops;
2296 	int ret;
2297 
2298 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2299 	if (sk_fullsock(sk)) {
2300 		sock_ops.is_fullsock = 1;
2301 		sock_owned_by_me(sk);
2302 	}
2303 
2304 	sock_ops.sk = sk;
2305 	sock_ops.op = op;
2306 	if (nargs > 0)
2307 		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2308 
2309 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2310 	if (ret == 0)
2311 		ret = sock_ops.reply;
2312 	else
2313 		ret = -1;
2314 	return ret;
2315 }
2316 
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2317 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2318 {
2319 	u32 args[2] = {arg1, arg2};
2320 
2321 	return tcp_call_bpf(sk, op, 2, args);
2322 }
2323 
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2324 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2325 				    u32 arg3)
2326 {
2327 	u32 args[3] = {arg1, arg2, arg3};
2328 
2329 	return tcp_call_bpf(sk, op, 3, args);
2330 }
2331 
2332 #else
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2333 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2334 {
2335 	return -EPERM;
2336 }
2337 
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2338 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2339 {
2340 	return -EPERM;
2341 }
2342 
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2343 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2344 				    u32 arg3)
2345 {
2346 	return -EPERM;
2347 }
2348 
2349 #endif
2350 
tcp_timeout_init(struct sock * sk)2351 static inline u32 tcp_timeout_init(struct sock *sk)
2352 {
2353 	int timeout;
2354 
2355 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2356 
2357 	if (timeout <= 0)
2358 		timeout = TCP_TIMEOUT_INIT;
2359 	return timeout;
2360 }
2361 
tcp_rwnd_init_bpf(struct sock * sk)2362 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2363 {
2364 	int rwnd;
2365 
2366 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2367 
2368 	if (rwnd < 0)
2369 		rwnd = 0;
2370 	return rwnd;
2371 }
2372 
tcp_bpf_ca_needs_ecn(struct sock * sk)2373 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2374 {
2375 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2376 }
2377 
tcp_bpf_rtt(struct sock * sk)2378 static inline void tcp_bpf_rtt(struct sock *sk)
2379 {
2380 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2381 		tcp_call_bpf(sk, BPF_SOCK_OPS_RTT_CB, 0, NULL);
2382 }
2383 
2384 #if IS_ENABLED(CONFIG_SMC)
2385 extern struct static_key_false tcp_have_smc;
2386 #endif
2387 
2388 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2389 void clean_acked_data_enable(struct inet_connection_sock *icsk,
2390 			     void (*cad)(struct sock *sk, u32 ack_seq));
2391 void clean_acked_data_disable(struct inet_connection_sock *icsk);
2392 void clean_acked_data_flush(void);
2393 #endif
2394 
2395 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
tcp_add_tx_delay(struct sk_buff * skb,const struct tcp_sock * tp)2396 static inline void tcp_add_tx_delay(struct sk_buff *skb,
2397 				    const struct tcp_sock *tp)
2398 {
2399 	if (static_branch_unlikely(&tcp_tx_delay_enabled))
2400 		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2401 }
2402 
2403 /* Compute Earliest Departure Time for some control packets
2404  * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2405  */
tcp_transmit_time(const struct sock * sk)2406 static inline u64 tcp_transmit_time(const struct sock *sk)
2407 {
2408 	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2409 		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2410 			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2411 
2412 		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2413 	}
2414 	return 0;
2415 }
2416 
2417 #endif	/* _TCP_H */
2418