• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Implementation of the Transmission Control Protocol(TCP).
7  *
8  *		IPv4 specific functions
9  *
10  *
11  *		code split from:
12  *		linux/ipv4/tcp.c
13  *		linux/ipv4/tcp_input.c
14  *		linux/ipv4/tcp_output.c
15  *
16  *		See tcp.c for author information
17  *
18  *	This program is free software; you can redistribute it and/or
19  *      modify it under the terms of the GNU General Public License
20  *      as published by the Free Software Foundation; either version
21  *      2 of the License, or (at your option) any later version.
22  */
23 
24 /*
25  * Changes:
26  *		David S. Miller	:	New socket lookup architecture.
27  *					This code is dedicated to John Dyson.
28  *		David S. Miller :	Change semantics of established hash,
29  *					half is devoted to TIME_WAIT sockets
30  *					and the rest go in the other half.
31  *		Andi Kleen :		Add support for syncookies and fixed
32  *					some bugs: ip options weren't passed to
33  *					the TCP layer, missed a check for an
34  *					ACK bit.
35  *		Andi Kleen :		Implemented fast path mtu discovery.
36  *	     				Fixed many serious bugs in the
37  *					request_sock handling and moved
38  *					most of it into the af independent code.
39  *					Added tail drop and some other bugfixes.
40  *					Added new listen semantics.
41  *		Mike McLagan	:	Routing by source
42  *	Juan Jose Ciarlante:		ip_dynaddr bits
43  *		Andi Kleen:		various fixes.
44  *	Vitaly E. Lavrov	:	Transparent proxy revived after year
45  *					coma.
46  *	Andi Kleen		:	Fix new listen.
47  *	Andi Kleen		:	Fix accept error reporting.
48  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
49  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
50  *					a single port at the same time.
51  */
52 
53 #define pr_fmt(fmt) "TCP: " fmt
54 
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65 
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
77 
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 
84 #include <crypto/hash.h>
85 #include <linux/scatterlist.h>
86 
87 int sysctl_tcp_tw_reuse __read_mostly;
88 int sysctl_tcp_low_latency __read_mostly;
89 
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
92 			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
93 #endif
94 
95 struct inet_hashinfo tcp_hashinfo;
96 EXPORT_SYMBOL(tcp_hashinfo);
97 
tcp_v4_init_sequence(const struct sk_buff * skb)98 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
99 {
100 	return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
101 					  ip_hdr(skb)->saddr,
102 					  tcp_hdr(skb)->dest,
103 					  tcp_hdr(skb)->source);
104 }
105 
tcp_twsk_unique(struct sock * sk,struct sock * sktw,void * twp)106 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
107 {
108 	const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
109 	struct tcp_sock *tp = tcp_sk(sk);
110 
111 	/* With PAWS, it is safe from the viewpoint
112 	   of data integrity. Even without PAWS it is safe provided sequence
113 	   spaces do not overlap i.e. at data rates <= 80Mbit/sec.
114 
115 	   Actually, the idea is close to VJ's one, only timestamp cache is
116 	   held not per host, but per port pair and TW bucket is used as state
117 	   holder.
118 
119 	   If TW bucket has been already destroyed we fall back to VJ's scheme
120 	   and use initial timestamp retrieved from peer table.
121 	 */
122 	if (tcptw->tw_ts_recent_stamp &&
123 	    (!twp || (sysctl_tcp_tw_reuse &&
124 			     get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
125 		tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
126 		if (tp->write_seq == 0)
127 			tp->write_seq = 1;
128 		tp->rx_opt.ts_recent	   = tcptw->tw_ts_recent;
129 		tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
130 		sock_hold(sktw);
131 		return 1;
132 	}
133 
134 	return 0;
135 }
136 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
137 
138 /* This will initiate an outgoing connection. */
tcp_v4_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)139 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
140 {
141 	struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
142 	struct inet_sock *inet = inet_sk(sk);
143 	struct tcp_sock *tp = tcp_sk(sk);
144 	__be16 orig_sport, orig_dport;
145 	__be32 daddr, nexthop;
146 	struct flowi4 *fl4;
147 	struct rtable *rt;
148 	int err;
149 	struct ip_options_rcu *inet_opt;
150 
151 	if (addr_len < sizeof(struct sockaddr_in))
152 		return -EINVAL;
153 
154 	if (usin->sin_family != AF_INET)
155 		return -EAFNOSUPPORT;
156 
157 	nexthop = daddr = usin->sin_addr.s_addr;
158 	inet_opt = rcu_dereference_protected(inet->inet_opt,
159 					     lockdep_sock_is_held(sk));
160 	if (inet_opt && inet_opt->opt.srr) {
161 		if (!daddr)
162 			return -EINVAL;
163 		nexthop = inet_opt->opt.faddr;
164 	}
165 
166 	orig_sport = inet->inet_sport;
167 	orig_dport = usin->sin_port;
168 	fl4 = &inet->cork.fl.u.ip4;
169 	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
170 			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
171 			      IPPROTO_TCP,
172 			      orig_sport, orig_dport, sk);
173 	if (IS_ERR(rt)) {
174 		err = PTR_ERR(rt);
175 		if (err == -ENETUNREACH)
176 			IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
177 		return err;
178 	}
179 
180 	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
181 		ip_rt_put(rt);
182 		return -ENETUNREACH;
183 	}
184 
185 	if (!inet_opt || !inet_opt->opt.srr)
186 		daddr = fl4->daddr;
187 
188 	if (!inet->inet_saddr)
189 		inet->inet_saddr = fl4->saddr;
190 	sk_rcv_saddr_set(sk, inet->inet_saddr);
191 
192 	if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
193 		/* Reset inherited state */
194 		tp->rx_opt.ts_recent	   = 0;
195 		tp->rx_opt.ts_recent_stamp = 0;
196 		if (likely(!tp->repair))
197 			tp->write_seq	   = 0;
198 	}
199 
200 	if (tcp_death_row.sysctl_tw_recycle &&
201 	    !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
202 		tcp_fetch_timewait_stamp(sk, &rt->dst);
203 
204 	inet->inet_dport = usin->sin_port;
205 	sk_daddr_set(sk, daddr);
206 
207 	inet_csk(sk)->icsk_ext_hdr_len = 0;
208 	if (inet_opt)
209 		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
210 
211 	tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
212 
213 	/* Socket identity is still unknown (sport may be zero).
214 	 * However we set state to SYN-SENT and not releasing socket
215 	 * lock select source port, enter ourselves into the hash tables and
216 	 * complete initialization after this.
217 	 */
218 	tcp_set_state(sk, TCP_SYN_SENT);
219 	err = inet_hash_connect(&tcp_death_row, sk);
220 	if (err)
221 		goto failure;
222 
223 	sk_set_txhash(sk);
224 
225 	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
226 			       inet->inet_sport, inet->inet_dport, sk);
227 	if (IS_ERR(rt)) {
228 		err = PTR_ERR(rt);
229 		rt = NULL;
230 		goto failure;
231 	}
232 	/* OK, now commit destination to socket.  */
233 	sk->sk_gso_type = SKB_GSO_TCPV4;
234 	sk_setup_caps(sk, &rt->dst);
235 	rt = NULL;
236 
237 	if (!tp->write_seq && likely(!tp->repair))
238 		tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
239 							   inet->inet_daddr,
240 							   inet->inet_sport,
241 							   usin->sin_port);
242 
243 	inet->inet_id = tp->write_seq ^ jiffies;
244 
245 	if (tcp_fastopen_defer_connect(sk, &err))
246 		return err;
247 	if (err)
248 		goto failure;
249 
250 	err = tcp_connect(sk);
251 
252 	if (err)
253 		goto failure;
254 
255 	return 0;
256 
257 failure:
258 	/*
259 	 * This unhashes the socket and releases the local port,
260 	 * if necessary.
261 	 */
262 	tcp_set_state(sk, TCP_CLOSE);
263 	ip_rt_put(rt);
264 	sk->sk_route_caps = 0;
265 	inet->inet_dport = 0;
266 	return err;
267 }
268 EXPORT_SYMBOL(tcp_v4_connect);
269 
270 /*
271  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
272  * It can be called through tcp_release_cb() if socket was owned by user
273  * at the time tcp_v4_err() was called to handle ICMP message.
274  */
tcp_v4_mtu_reduced(struct sock * sk)275 void tcp_v4_mtu_reduced(struct sock *sk)
276 {
277 	struct inet_sock *inet = inet_sk(sk);
278 	struct dst_entry *dst;
279 	u32 mtu;
280 
281 	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
282 		return;
283 	mtu = tcp_sk(sk)->mtu_info;
284 	dst = inet_csk_update_pmtu(sk, mtu);
285 	if (!dst)
286 		return;
287 
288 	/* Something is about to be wrong... Remember soft error
289 	 * for the case, if this connection will not able to recover.
290 	 */
291 	if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
292 		sk->sk_err_soft = EMSGSIZE;
293 
294 	mtu = dst_mtu(dst);
295 
296 	if (inet->pmtudisc != IP_PMTUDISC_DONT &&
297 	    ip_sk_accept_pmtu(sk) &&
298 	    inet_csk(sk)->icsk_pmtu_cookie > mtu) {
299 		tcp_sync_mss(sk, mtu);
300 
301 		/* Resend the TCP packet because it's
302 		 * clear that the old packet has been
303 		 * dropped. This is the new "fast" path mtu
304 		 * discovery.
305 		 */
306 		tcp_simple_retransmit(sk);
307 	} /* else let the usual retransmit timer handle it */
308 }
309 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
310 
do_redirect(struct sk_buff * skb,struct sock * sk)311 static void do_redirect(struct sk_buff *skb, struct sock *sk)
312 {
313 	struct dst_entry *dst = __sk_dst_check(sk, 0);
314 
315 	if (dst)
316 		dst->ops->redirect(dst, sk, skb);
317 }
318 
319 
320 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
tcp_req_err(struct sock * sk,u32 seq,bool abort)321 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
322 {
323 	struct request_sock *req = inet_reqsk(sk);
324 	struct net *net = sock_net(sk);
325 
326 	/* ICMPs are not backlogged, hence we cannot get
327 	 * an established socket here.
328 	 */
329 	if (seq != tcp_rsk(req)->snt_isn) {
330 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
331 	} else if (abort) {
332 		/*
333 		 * Still in SYN_RECV, just remove it silently.
334 		 * There is no good way to pass the error to the newly
335 		 * created socket, and POSIX does not want network
336 		 * errors returned from accept().
337 		 */
338 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
339 		tcp_listendrop(req->rsk_listener);
340 	}
341 	reqsk_put(req);
342 }
343 EXPORT_SYMBOL(tcp_req_err);
344 
345 /*
346  * This routine is called by the ICMP module when it gets some
347  * sort of error condition.  If err < 0 then the socket should
348  * be closed and the error returned to the user.  If err > 0
349  * it's just the icmp type << 8 | icmp code.  After adjustment
350  * header points to the first 8 bytes of the tcp header.  We need
351  * to find the appropriate port.
352  *
353  * The locking strategy used here is very "optimistic". When
354  * someone else accesses the socket the ICMP is just dropped
355  * and for some paths there is no check at all.
356  * A more general error queue to queue errors for later handling
357  * is probably better.
358  *
359  */
360 
tcp_v4_err(struct sk_buff * icmp_skb,u32 info)361 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
362 {
363 	const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
364 	struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
365 	struct inet_connection_sock *icsk;
366 	struct tcp_sock *tp;
367 	struct inet_sock *inet;
368 	const int type = icmp_hdr(icmp_skb)->type;
369 	const int code = icmp_hdr(icmp_skb)->code;
370 	struct sock *sk;
371 	struct sk_buff *skb;
372 	struct request_sock *fastopen;
373 	__u32 seq, snd_una;
374 	__u32 remaining;
375 	int err;
376 	struct net *net = dev_net(icmp_skb->dev);
377 
378 	sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
379 				       th->dest, iph->saddr, ntohs(th->source),
380 				       inet_iif(icmp_skb));
381 	if (!sk) {
382 		__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
383 		return;
384 	}
385 	if (sk->sk_state == TCP_TIME_WAIT) {
386 		inet_twsk_put(inet_twsk(sk));
387 		return;
388 	}
389 	seq = ntohl(th->seq);
390 	if (sk->sk_state == TCP_NEW_SYN_RECV)
391 		return tcp_req_err(sk, seq,
392 				  type == ICMP_PARAMETERPROB ||
393 				  type == ICMP_TIME_EXCEEDED ||
394 				  (type == ICMP_DEST_UNREACH &&
395 				   (code == ICMP_NET_UNREACH ||
396 				    code == ICMP_HOST_UNREACH)));
397 
398 	bh_lock_sock(sk);
399 	/* If too many ICMPs get dropped on busy
400 	 * servers this needs to be solved differently.
401 	 * We do take care of PMTU discovery (RFC1191) special case :
402 	 * we can receive locally generated ICMP messages while socket is held.
403 	 */
404 	if (sock_owned_by_user(sk)) {
405 		if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
406 			__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
407 	}
408 	if (sk->sk_state == TCP_CLOSE)
409 		goto out;
410 
411 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
412 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
413 		goto out;
414 	}
415 
416 	icsk = inet_csk(sk);
417 	tp = tcp_sk(sk);
418 	/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
419 	fastopen = tp->fastopen_rsk;
420 	snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
421 	if (sk->sk_state != TCP_LISTEN &&
422 	    !between(seq, snd_una, tp->snd_nxt)) {
423 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
424 		goto out;
425 	}
426 
427 	switch (type) {
428 	case ICMP_REDIRECT:
429 		if (!sock_owned_by_user(sk))
430 			do_redirect(icmp_skb, sk);
431 		goto out;
432 	case ICMP_SOURCE_QUENCH:
433 		/* Just silently ignore these. */
434 		goto out;
435 	case ICMP_PARAMETERPROB:
436 		err = EPROTO;
437 		break;
438 	case ICMP_DEST_UNREACH:
439 		if (code > NR_ICMP_UNREACH)
440 			goto out;
441 
442 		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
443 			/* We are not interested in TCP_LISTEN and open_requests
444 			 * (SYN-ACKs send out by Linux are always <576bytes so
445 			 * they should go through unfragmented).
446 			 */
447 			if (sk->sk_state == TCP_LISTEN)
448 				goto out;
449 
450 			tp->mtu_info = info;
451 			if (!sock_owned_by_user(sk)) {
452 				tcp_v4_mtu_reduced(sk);
453 			} else {
454 				if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
455 					sock_hold(sk);
456 			}
457 			goto out;
458 		}
459 
460 		err = icmp_err_convert[code].errno;
461 		/* check if icmp_skb allows revert of backoff
462 		 * (see draft-zimmermann-tcp-lcd) */
463 		if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
464 			break;
465 		if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
466 		    !icsk->icsk_backoff || fastopen)
467 			break;
468 
469 		if (sock_owned_by_user(sk))
470 			break;
471 
472 		icsk->icsk_backoff--;
473 		icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
474 					       TCP_TIMEOUT_INIT;
475 		icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
476 
477 		skb = tcp_write_queue_head(sk);
478 		BUG_ON(!skb);
479 
480 		remaining = icsk->icsk_rto -
481 			    min(icsk->icsk_rto,
482 				tcp_time_stamp - tcp_skb_timestamp(skb));
483 
484 		if (remaining) {
485 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
486 						  remaining, TCP_RTO_MAX);
487 		} else {
488 			/* RTO revert clocked out retransmission.
489 			 * Will retransmit now */
490 			tcp_retransmit_timer(sk);
491 		}
492 
493 		break;
494 	case ICMP_TIME_EXCEEDED:
495 		err = EHOSTUNREACH;
496 		break;
497 	default:
498 		goto out;
499 	}
500 
501 	switch (sk->sk_state) {
502 	case TCP_SYN_SENT:
503 	case TCP_SYN_RECV:
504 		/* Only in fast or simultaneous open. If a fast open socket is
505 		 * is already accepted it is treated as a connected one below.
506 		 */
507 		if (fastopen && !fastopen->sk)
508 			break;
509 
510 		if (!sock_owned_by_user(sk)) {
511 			sk->sk_err = err;
512 
513 			sk->sk_error_report(sk);
514 
515 			tcp_done(sk);
516 		} else {
517 			sk->sk_err_soft = err;
518 		}
519 		goto out;
520 	}
521 
522 	/* If we've already connected we will keep trying
523 	 * until we time out, or the user gives up.
524 	 *
525 	 * rfc1122 4.2.3.9 allows to consider as hard errors
526 	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
527 	 * but it is obsoleted by pmtu discovery).
528 	 *
529 	 * Note, that in modern internet, where routing is unreliable
530 	 * and in each dark corner broken firewalls sit, sending random
531 	 * errors ordered by their masters even this two messages finally lose
532 	 * their original sense (even Linux sends invalid PORT_UNREACHs)
533 	 *
534 	 * Now we are in compliance with RFCs.
535 	 *							--ANK (980905)
536 	 */
537 
538 	inet = inet_sk(sk);
539 	if (!sock_owned_by_user(sk) && inet->recverr) {
540 		sk->sk_err = err;
541 		sk->sk_error_report(sk);
542 	} else	{ /* Only an error on timeout */
543 		sk->sk_err_soft = err;
544 	}
545 
546 out:
547 	bh_unlock_sock(sk);
548 	sock_put(sk);
549 }
550 
__tcp_v4_send_check(struct sk_buff * skb,__be32 saddr,__be32 daddr)551 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
552 {
553 	struct tcphdr *th = tcp_hdr(skb);
554 
555 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
556 		th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
557 		skb->csum_start = skb_transport_header(skb) - skb->head;
558 		skb->csum_offset = offsetof(struct tcphdr, check);
559 	} else {
560 		th->check = tcp_v4_check(skb->len, saddr, daddr,
561 					 csum_partial(th,
562 						      th->doff << 2,
563 						      skb->csum));
564 	}
565 }
566 
567 /* This routine computes an IPv4 TCP checksum. */
tcp_v4_send_check(struct sock * sk,struct sk_buff * skb)568 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
569 {
570 	const struct inet_sock *inet = inet_sk(sk);
571 
572 	__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
573 }
574 EXPORT_SYMBOL(tcp_v4_send_check);
575 
576 /*
577  *	This routine will send an RST to the other tcp.
578  *
579  *	Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
580  *		      for reset.
581  *	Answer: if a packet caused RST, it is not for a socket
582  *		existing in our system, if it is matched to a socket,
583  *		it is just duplicate segment or bug in other side's TCP.
584  *		So that we build reply only basing on parameters
585  *		arrived with segment.
586  *	Exception: precedence violation. We do not implement it in any case.
587  */
588 
tcp_v4_send_reset(const struct sock * sk,struct sk_buff * skb)589 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
590 {
591 	const struct tcphdr *th = tcp_hdr(skb);
592 	struct {
593 		struct tcphdr th;
594 #ifdef CONFIG_TCP_MD5SIG
595 		__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
596 #endif
597 	} rep;
598 	struct ip_reply_arg arg;
599 #ifdef CONFIG_TCP_MD5SIG
600 	struct tcp_md5sig_key *key = NULL;
601 	const __u8 *hash_location = NULL;
602 	unsigned char newhash[16];
603 	int genhash;
604 	struct sock *sk1 = NULL;
605 #endif
606 	struct net *net;
607 
608 	/* Never send a reset in response to a reset. */
609 	if (th->rst)
610 		return;
611 
612 	/* If sk not NULL, it means we did a successful lookup and incoming
613 	 * route had to be correct. prequeue might have dropped our dst.
614 	 */
615 	if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
616 		return;
617 
618 	/* Swap the send and the receive. */
619 	memset(&rep, 0, sizeof(rep));
620 	rep.th.dest   = th->source;
621 	rep.th.source = th->dest;
622 	rep.th.doff   = sizeof(struct tcphdr) / 4;
623 	rep.th.rst    = 1;
624 
625 	if (th->ack) {
626 		rep.th.seq = th->ack_seq;
627 	} else {
628 		rep.th.ack = 1;
629 		rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
630 				       skb->len - (th->doff << 2));
631 	}
632 
633 	memset(&arg, 0, sizeof(arg));
634 	arg.iov[0].iov_base = (unsigned char *)&rep;
635 	arg.iov[0].iov_len  = sizeof(rep.th);
636 
637 	net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
638 #ifdef CONFIG_TCP_MD5SIG
639 	rcu_read_lock();
640 	hash_location = tcp_parse_md5sig_option(th);
641 	if (sk && sk_fullsock(sk)) {
642 		key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
643 					&ip_hdr(skb)->saddr, AF_INET);
644 	} else if (hash_location) {
645 		/*
646 		 * active side is lost. Try to find listening socket through
647 		 * source port, and then find md5 key through listening socket.
648 		 * we are not loose security here:
649 		 * Incoming packet is checked with md5 hash with finding key,
650 		 * no RST generated if md5 hash doesn't match.
651 		 */
652 		sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
653 					     ip_hdr(skb)->saddr,
654 					     th->source, ip_hdr(skb)->daddr,
655 					     ntohs(th->source), inet_iif(skb));
656 		/* don't send rst if it can't find key */
657 		if (!sk1)
658 			goto out;
659 
660 		key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
661 					&ip_hdr(skb)->saddr, AF_INET);
662 		if (!key)
663 			goto out;
664 
665 
666 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
667 		if (genhash || memcmp(hash_location, newhash, 16) != 0)
668 			goto out;
669 
670 	}
671 
672 	if (key) {
673 		rep.opt[0] = htonl((TCPOPT_NOP << 24) |
674 				   (TCPOPT_NOP << 16) |
675 				   (TCPOPT_MD5SIG << 8) |
676 				   TCPOLEN_MD5SIG);
677 		/* Update length and the length the header thinks exists */
678 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
679 		rep.th.doff = arg.iov[0].iov_len / 4;
680 
681 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
682 				     key, ip_hdr(skb)->saddr,
683 				     ip_hdr(skb)->daddr, &rep.th);
684 	}
685 #endif
686 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
687 				      ip_hdr(skb)->saddr, /* XXX */
688 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
689 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
690 	arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
691 
692 	/* When socket is gone, all binding information is lost.
693 	 * routing might fail in this case. No choice here, if we choose to force
694 	 * input interface, we will misroute in case of asymmetric route.
695 	 */
696 	if (sk)
697 		arg.bound_dev_if = sk->sk_bound_dev_if;
698 
699 	BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
700 		     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
701 
702 	arg.tos = ip_hdr(skb)->tos;
703 	arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
704 	local_bh_disable();
705 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
706 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
707 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
708 			      &arg, arg.iov[0].iov_len);
709 
710 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
711 	__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
712 	local_bh_enable();
713 
714 #ifdef CONFIG_TCP_MD5SIG
715 out:
716 	rcu_read_unlock();
717 #endif
718 }
719 
720 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
721    outside socket context is ugly, certainly. What can I do?
722  */
723 
tcp_v4_send_ack(const struct sock * sk,struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 tsval,u32 tsecr,int oif,struct tcp_md5sig_key * key,int reply_flags,u8 tos)724 static void tcp_v4_send_ack(const struct sock *sk,
725 			    struct sk_buff *skb, u32 seq, u32 ack,
726 			    u32 win, u32 tsval, u32 tsecr, int oif,
727 			    struct tcp_md5sig_key *key,
728 			    int reply_flags, u8 tos)
729 {
730 	const struct tcphdr *th = tcp_hdr(skb);
731 	struct {
732 		struct tcphdr th;
733 		__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
734 #ifdef CONFIG_TCP_MD5SIG
735 			   + (TCPOLEN_MD5SIG_ALIGNED >> 2)
736 #endif
737 			];
738 	} rep;
739 	struct net *net = sock_net(sk);
740 	struct ip_reply_arg arg;
741 
742 	memset(&rep.th, 0, sizeof(struct tcphdr));
743 	memset(&arg, 0, sizeof(arg));
744 
745 	arg.iov[0].iov_base = (unsigned char *)&rep;
746 	arg.iov[0].iov_len  = sizeof(rep.th);
747 	if (tsecr) {
748 		rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
749 				   (TCPOPT_TIMESTAMP << 8) |
750 				   TCPOLEN_TIMESTAMP);
751 		rep.opt[1] = htonl(tsval);
752 		rep.opt[2] = htonl(tsecr);
753 		arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
754 	}
755 
756 	/* Swap the send and the receive. */
757 	rep.th.dest    = th->source;
758 	rep.th.source  = th->dest;
759 	rep.th.doff    = arg.iov[0].iov_len / 4;
760 	rep.th.seq     = htonl(seq);
761 	rep.th.ack_seq = htonl(ack);
762 	rep.th.ack     = 1;
763 	rep.th.window  = htons(win);
764 
765 #ifdef CONFIG_TCP_MD5SIG
766 	if (key) {
767 		int offset = (tsecr) ? 3 : 0;
768 
769 		rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
770 					  (TCPOPT_NOP << 16) |
771 					  (TCPOPT_MD5SIG << 8) |
772 					  TCPOLEN_MD5SIG);
773 		arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
774 		rep.th.doff = arg.iov[0].iov_len/4;
775 
776 		tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
777 				    key, ip_hdr(skb)->saddr,
778 				    ip_hdr(skb)->daddr, &rep.th);
779 	}
780 #endif
781 	arg.flags = reply_flags;
782 	arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
783 				      ip_hdr(skb)->saddr, /* XXX */
784 				      arg.iov[0].iov_len, IPPROTO_TCP, 0);
785 	arg.csumoffset = offsetof(struct tcphdr, check) / 2;
786 	if (oif)
787 		arg.bound_dev_if = oif;
788 	arg.tos = tos;
789 	arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
790 	local_bh_disable();
791 	ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
792 			      skb, &TCP_SKB_CB(skb)->header.h4.opt,
793 			      ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
794 			      &arg, arg.iov[0].iov_len);
795 
796 	__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
797 	local_bh_enable();
798 }
799 
tcp_v4_timewait_ack(struct sock * sk,struct sk_buff * skb)800 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
801 {
802 	struct inet_timewait_sock *tw = inet_twsk(sk);
803 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
804 
805 	tcp_v4_send_ack(sk, skb,
806 			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
807 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
808 			tcp_time_stamp + tcptw->tw_ts_offset,
809 			tcptw->tw_ts_recent,
810 			tw->tw_bound_dev_if,
811 			tcp_twsk_md5_key(tcptw),
812 			tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
813 			tw->tw_tos
814 			);
815 
816 	inet_twsk_put(tw);
817 }
818 
tcp_v4_reqsk_send_ack(const struct sock * sk,struct sk_buff * skb,struct request_sock * req)819 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
820 				  struct request_sock *req)
821 {
822 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
823 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
824 	 */
825 	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
826 					     tcp_sk(sk)->snd_nxt;
827 
828 	/* RFC 7323 2.3
829 	 * The window field (SEG.WND) of every outgoing segment, with the
830 	 * exception of <SYN> segments, MUST be right-shifted by
831 	 * Rcv.Wind.Shift bits:
832 	 */
833 	tcp_v4_send_ack(sk, skb, seq,
834 			tcp_rsk(req)->rcv_nxt,
835 			req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
836 			tcp_time_stamp,
837 			req->ts_recent,
838 			0,
839 			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
840 					  AF_INET),
841 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
842 			ip_hdr(skb)->tos);
843 }
844 
845 /*
846  *	Send a SYN-ACK after having received a SYN.
847  *	This still operates on a request_sock only, not on a big
848  *	socket.
849  */
tcp_v4_send_synack(const struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type)850 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
851 			      struct flowi *fl,
852 			      struct request_sock *req,
853 			      struct tcp_fastopen_cookie *foc,
854 			      enum tcp_synack_type synack_type)
855 {
856 	const struct inet_request_sock *ireq = inet_rsk(req);
857 	struct flowi4 fl4;
858 	int err = -1;
859 	struct sk_buff *skb;
860 
861 	/* First, grab a route. */
862 	if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
863 		return -1;
864 
865 	skb = tcp_make_synack(sk, dst, req, foc, synack_type);
866 
867 	if (skb) {
868 		__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
869 
870 		err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
871 					    ireq->ir_rmt_addr,
872 					    ireq_opt_deref(ireq));
873 		err = net_xmit_eval(err);
874 	}
875 
876 	return err;
877 }
878 
879 /*
880  *	IPv4 request_sock destructor.
881  */
tcp_v4_reqsk_destructor(struct request_sock * req)882 static void tcp_v4_reqsk_destructor(struct request_sock *req)
883 {
884 	kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
885 }
886 
887 #ifdef CONFIG_TCP_MD5SIG
888 /*
889  * RFC2385 MD5 checksumming requires a mapping of
890  * IP address->MD5 Key.
891  * We need to maintain these in the sk structure.
892  */
893 
894 /* Find the Key structure for an address.  */
tcp_md5_do_lookup(const struct sock * sk,const union tcp_md5_addr * addr,int family)895 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
896 					 const union tcp_md5_addr *addr,
897 					 int family)
898 {
899 	const struct tcp_sock *tp = tcp_sk(sk);
900 	struct tcp_md5sig_key *key;
901 	unsigned int size = sizeof(struct in_addr);
902 	const struct tcp_md5sig_info *md5sig;
903 
904 	/* caller either holds rcu_read_lock() or socket lock */
905 	md5sig = rcu_dereference_check(tp->md5sig_info,
906 				       lockdep_sock_is_held(sk));
907 	if (!md5sig)
908 		return NULL;
909 #if IS_ENABLED(CONFIG_IPV6)
910 	if (family == AF_INET6)
911 		size = sizeof(struct in6_addr);
912 #endif
913 	hlist_for_each_entry_rcu(key, &md5sig->head, node) {
914 		if (key->family != family)
915 			continue;
916 		if (!memcmp(&key->addr, addr, size))
917 			return key;
918 	}
919 	return NULL;
920 }
921 EXPORT_SYMBOL(tcp_md5_do_lookup);
922 
tcp_v4_md5_lookup(const struct sock * sk,const struct sock * addr_sk)923 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
924 					 const struct sock *addr_sk)
925 {
926 	const union tcp_md5_addr *addr;
927 
928 	addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
929 	return tcp_md5_do_lookup(sk, addr, AF_INET);
930 }
931 EXPORT_SYMBOL(tcp_v4_md5_lookup);
932 
933 /* This can be called on a newly created socket, from other files */
tcp_md5_do_add(struct sock * sk,const union tcp_md5_addr * addr,int family,const u8 * newkey,u8 newkeylen,gfp_t gfp)934 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
935 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
936 {
937 	/* Add Key to the list */
938 	struct tcp_md5sig_key *key;
939 	struct tcp_sock *tp = tcp_sk(sk);
940 	struct tcp_md5sig_info *md5sig;
941 
942 	key = tcp_md5_do_lookup(sk, addr, family);
943 	if (key) {
944 		/* Pre-existing entry - just update that one. */
945 		memcpy(key->key, newkey, newkeylen);
946 		key->keylen = newkeylen;
947 		return 0;
948 	}
949 
950 	md5sig = rcu_dereference_protected(tp->md5sig_info,
951 					   lockdep_sock_is_held(sk));
952 	if (!md5sig) {
953 		md5sig = kmalloc(sizeof(*md5sig), gfp);
954 		if (!md5sig)
955 			return -ENOMEM;
956 
957 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
958 		INIT_HLIST_HEAD(&md5sig->head);
959 		rcu_assign_pointer(tp->md5sig_info, md5sig);
960 	}
961 
962 	key = sock_kmalloc(sk, sizeof(*key), gfp);
963 	if (!key)
964 		return -ENOMEM;
965 	if (!tcp_alloc_md5sig_pool()) {
966 		sock_kfree_s(sk, key, sizeof(*key));
967 		return -ENOMEM;
968 	}
969 
970 	memcpy(key->key, newkey, newkeylen);
971 	key->keylen = newkeylen;
972 	key->family = family;
973 	memcpy(&key->addr, addr,
974 	       (family == AF_INET6) ? sizeof(struct in6_addr) :
975 				      sizeof(struct in_addr));
976 	hlist_add_head_rcu(&key->node, &md5sig->head);
977 	return 0;
978 }
979 EXPORT_SYMBOL(tcp_md5_do_add);
980 
tcp_md5_do_del(struct sock * sk,const union tcp_md5_addr * addr,int family)981 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
982 {
983 	struct tcp_md5sig_key *key;
984 
985 	key = tcp_md5_do_lookup(sk, addr, family);
986 	if (!key)
987 		return -ENOENT;
988 	hlist_del_rcu(&key->node);
989 	atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
990 	kfree_rcu(key, rcu);
991 	return 0;
992 }
993 EXPORT_SYMBOL(tcp_md5_do_del);
994 
tcp_clear_md5_list(struct sock * sk)995 static void tcp_clear_md5_list(struct sock *sk)
996 {
997 	struct tcp_sock *tp = tcp_sk(sk);
998 	struct tcp_md5sig_key *key;
999 	struct hlist_node *n;
1000 	struct tcp_md5sig_info *md5sig;
1001 
1002 	md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1003 
1004 	hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1005 		hlist_del_rcu(&key->node);
1006 		atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1007 		kfree_rcu(key, rcu);
1008 	}
1009 }
1010 
tcp_v4_parse_md5_keys(struct sock * sk,char __user * optval,int optlen)1011 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1012 				 int optlen)
1013 {
1014 	struct tcp_md5sig cmd;
1015 	struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1016 
1017 	if (optlen < sizeof(cmd))
1018 		return -EINVAL;
1019 
1020 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
1021 		return -EFAULT;
1022 
1023 	if (sin->sin_family != AF_INET)
1024 		return -EINVAL;
1025 
1026 	if (!cmd.tcpm_keylen)
1027 		return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1028 				      AF_INET);
1029 
1030 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1031 		return -EINVAL;
1032 
1033 	return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1034 			      AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1035 			      GFP_KERNEL);
1036 }
1037 
tcp_v4_md5_hash_headers(struct tcp_md5sig_pool * hp,__be32 daddr,__be32 saddr,const struct tcphdr * th,int nbytes)1038 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1039 				   __be32 daddr, __be32 saddr,
1040 				   const struct tcphdr *th, int nbytes)
1041 {
1042 	struct tcp4_pseudohdr *bp;
1043 	struct scatterlist sg;
1044 	struct tcphdr *_th;
1045 
1046 	bp = hp->scratch;
1047 	bp->saddr = saddr;
1048 	bp->daddr = daddr;
1049 	bp->pad = 0;
1050 	bp->protocol = IPPROTO_TCP;
1051 	bp->len = cpu_to_be16(nbytes);
1052 
1053 	_th = (struct tcphdr *)(bp + 1);
1054 	memcpy(_th, th, sizeof(*th));
1055 	_th->check = 0;
1056 
1057 	sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1058 	ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1059 				sizeof(*bp) + sizeof(*th));
1060 	return crypto_ahash_update(hp->md5_req);
1061 }
1062 
tcp_v4_md5_hash_hdr(char * md5_hash,const struct tcp_md5sig_key * key,__be32 daddr,__be32 saddr,const struct tcphdr * th)1063 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1064 			       __be32 daddr, __be32 saddr, const struct tcphdr *th)
1065 {
1066 	struct tcp_md5sig_pool *hp;
1067 	struct ahash_request *req;
1068 
1069 	hp = tcp_get_md5sig_pool();
1070 	if (!hp)
1071 		goto clear_hash_noput;
1072 	req = hp->md5_req;
1073 
1074 	if (crypto_ahash_init(req))
1075 		goto clear_hash;
1076 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1077 		goto clear_hash;
1078 	if (tcp_md5_hash_key(hp, key))
1079 		goto clear_hash;
1080 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1081 	if (crypto_ahash_final(req))
1082 		goto clear_hash;
1083 
1084 	tcp_put_md5sig_pool();
1085 	return 0;
1086 
1087 clear_hash:
1088 	tcp_put_md5sig_pool();
1089 clear_hash_noput:
1090 	memset(md5_hash, 0, 16);
1091 	return 1;
1092 }
1093 
tcp_v4_md5_hash_skb(char * md5_hash,const struct tcp_md5sig_key * key,const struct sock * sk,const struct sk_buff * skb)1094 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1095 			const struct sock *sk,
1096 			const struct sk_buff *skb)
1097 {
1098 	struct tcp_md5sig_pool *hp;
1099 	struct ahash_request *req;
1100 	const struct tcphdr *th = tcp_hdr(skb);
1101 	__be32 saddr, daddr;
1102 
1103 	if (sk) { /* valid for establish/request sockets */
1104 		saddr = sk->sk_rcv_saddr;
1105 		daddr = sk->sk_daddr;
1106 	} else {
1107 		const struct iphdr *iph = ip_hdr(skb);
1108 		saddr = iph->saddr;
1109 		daddr = iph->daddr;
1110 	}
1111 
1112 	hp = tcp_get_md5sig_pool();
1113 	if (!hp)
1114 		goto clear_hash_noput;
1115 	req = hp->md5_req;
1116 
1117 	if (crypto_ahash_init(req))
1118 		goto clear_hash;
1119 
1120 	if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1121 		goto clear_hash;
1122 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1123 		goto clear_hash;
1124 	if (tcp_md5_hash_key(hp, key))
1125 		goto clear_hash;
1126 	ahash_request_set_crypt(req, NULL, md5_hash, 0);
1127 	if (crypto_ahash_final(req))
1128 		goto clear_hash;
1129 
1130 	tcp_put_md5sig_pool();
1131 	return 0;
1132 
1133 clear_hash:
1134 	tcp_put_md5sig_pool();
1135 clear_hash_noput:
1136 	memset(md5_hash, 0, 16);
1137 	return 1;
1138 }
1139 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1140 
1141 #endif
1142 
1143 /* Called with rcu_read_lock() */
tcp_v4_inbound_md5_hash(const struct sock * sk,const struct sk_buff * skb)1144 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1145 				    const struct sk_buff *skb)
1146 {
1147 #ifdef CONFIG_TCP_MD5SIG
1148 	/*
1149 	 * This gets called for each TCP segment that arrives
1150 	 * so we want to be efficient.
1151 	 * We have 3 drop cases:
1152 	 * o No MD5 hash and one expected.
1153 	 * o MD5 hash and we're not expecting one.
1154 	 * o MD5 hash and its wrong.
1155 	 */
1156 	const __u8 *hash_location = NULL;
1157 	struct tcp_md5sig_key *hash_expected;
1158 	const struct iphdr *iph = ip_hdr(skb);
1159 	const struct tcphdr *th = tcp_hdr(skb);
1160 	int genhash;
1161 	unsigned char newhash[16];
1162 
1163 	hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1164 					  AF_INET);
1165 	hash_location = tcp_parse_md5sig_option(th);
1166 
1167 	/* We've parsed the options - do we have a hash? */
1168 	if (!hash_expected && !hash_location)
1169 		return false;
1170 
1171 	if (hash_expected && !hash_location) {
1172 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1173 		return true;
1174 	}
1175 
1176 	if (!hash_expected && hash_location) {
1177 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1178 		return true;
1179 	}
1180 
1181 	/* Okay, so this is hash_expected and hash_location -
1182 	 * so we need to calculate the checksum.
1183 	 */
1184 	genhash = tcp_v4_md5_hash_skb(newhash,
1185 				      hash_expected,
1186 				      NULL, skb);
1187 
1188 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1189 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1190 		net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1191 				     &iph->saddr, ntohs(th->source),
1192 				     &iph->daddr, ntohs(th->dest),
1193 				     genhash ? " tcp_v4_calc_md5_hash failed"
1194 				     : "");
1195 		return true;
1196 	}
1197 	return false;
1198 #endif
1199 	return false;
1200 }
1201 
tcp_v4_init_req(struct request_sock * req,const struct sock * sk_listener,struct sk_buff * skb)1202 static void tcp_v4_init_req(struct request_sock *req,
1203 			    const struct sock *sk_listener,
1204 			    struct sk_buff *skb)
1205 {
1206 	struct inet_request_sock *ireq = inet_rsk(req);
1207 
1208 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1209 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1210 	RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
1211 }
1212 
tcp_v4_route_req(const struct sock * sk,struct flowi * fl,const struct request_sock * req,bool * strict)1213 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1214 					  struct flowi *fl,
1215 					  const struct request_sock *req,
1216 					  bool *strict)
1217 {
1218 	struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1219 
1220 	if (strict) {
1221 		if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1222 			*strict = true;
1223 		else
1224 			*strict = false;
1225 	}
1226 
1227 	return dst;
1228 }
1229 
1230 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1231 	.family		=	PF_INET,
1232 	.obj_size	=	sizeof(struct tcp_request_sock),
1233 	.rtx_syn_ack	=	tcp_rtx_synack,
1234 	.send_ack	=	tcp_v4_reqsk_send_ack,
1235 	.destructor	=	tcp_v4_reqsk_destructor,
1236 	.send_reset	=	tcp_v4_send_reset,
1237 	.syn_ack_timeout =	tcp_syn_ack_timeout,
1238 };
1239 
1240 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1241 	.mss_clamp	=	TCP_MSS_DEFAULT,
1242 #ifdef CONFIG_TCP_MD5SIG
1243 	.req_md5_lookup	=	tcp_v4_md5_lookup,
1244 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1245 #endif
1246 	.init_req	=	tcp_v4_init_req,
1247 #ifdef CONFIG_SYN_COOKIES
1248 	.cookie_init_seq =	cookie_v4_init_sequence,
1249 #endif
1250 	.route_req	=	tcp_v4_route_req,
1251 	.init_seq	=	tcp_v4_init_sequence,
1252 	.send_synack	=	tcp_v4_send_synack,
1253 };
1254 
tcp_v4_conn_request(struct sock * sk,struct sk_buff * skb)1255 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1256 {
1257 	/* Never answer to SYNs send to broadcast or multicast */
1258 	if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1259 		goto drop;
1260 
1261 	return tcp_conn_request(&tcp_request_sock_ops,
1262 				&tcp_request_sock_ipv4_ops, sk, skb);
1263 
1264 drop:
1265 	tcp_listendrop(sk);
1266 	return 0;
1267 }
1268 EXPORT_SYMBOL(tcp_v4_conn_request);
1269 
1270 
1271 /*
1272  * The three way handshake has completed - we got a valid synack -
1273  * now create the new socket.
1274  */
tcp_v4_syn_recv_sock(const struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct dst_entry * dst,struct request_sock * req_unhash,bool * own_req)1275 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1276 				  struct request_sock *req,
1277 				  struct dst_entry *dst,
1278 				  struct request_sock *req_unhash,
1279 				  bool *own_req)
1280 {
1281 	struct inet_request_sock *ireq;
1282 	struct inet_sock *newinet;
1283 	struct tcp_sock *newtp;
1284 	struct sock *newsk;
1285 #ifdef CONFIG_TCP_MD5SIG
1286 	struct tcp_md5sig_key *key;
1287 #endif
1288 	struct ip_options_rcu *inet_opt;
1289 
1290 	if (sk_acceptq_is_full(sk))
1291 		goto exit_overflow;
1292 
1293 	newsk = tcp_create_openreq_child(sk, req, skb);
1294 	if (!newsk)
1295 		goto exit_nonewsk;
1296 
1297 	newsk->sk_gso_type = SKB_GSO_TCPV4;
1298 	inet_sk_rx_dst_set(newsk, skb);
1299 
1300 	newtp		      = tcp_sk(newsk);
1301 	newinet		      = inet_sk(newsk);
1302 	ireq		      = inet_rsk(req);
1303 	sk_daddr_set(newsk, ireq->ir_rmt_addr);
1304 	sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1305 	newsk->sk_bound_dev_if = ireq->ir_iif;
1306 	newinet->inet_saddr   = ireq->ir_loc_addr;
1307 	inet_opt	      = rcu_dereference(ireq->ireq_opt);
1308 	RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1309 	newinet->mc_index     = inet_iif(skb);
1310 	newinet->mc_ttl	      = ip_hdr(skb)->ttl;
1311 	newinet->rcv_tos      = ip_hdr(skb)->tos;
1312 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1313 	if (inet_opt)
1314 		inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1315 	newinet->inet_id = newtp->write_seq ^ jiffies;
1316 
1317 	if (!dst) {
1318 		dst = inet_csk_route_child_sock(sk, newsk, req);
1319 		if (!dst)
1320 			goto put_and_exit;
1321 	} else {
1322 		/* syncookie case : see end of cookie_v4_check() */
1323 	}
1324 	sk_setup_caps(newsk, dst);
1325 
1326 	tcp_ca_openreq_child(newsk, dst);
1327 
1328 	tcp_sync_mss(newsk, dst_mtu(dst));
1329 	newtp->advmss = dst_metric_advmss(dst);
1330 	if (tcp_sk(sk)->rx_opt.user_mss &&
1331 	    tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1332 		newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1333 
1334 	tcp_initialize_rcv_mss(newsk);
1335 
1336 #ifdef CONFIG_TCP_MD5SIG
1337 	/* Copy over the MD5 key from the original socket */
1338 	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1339 				AF_INET);
1340 	if (key) {
1341 		/*
1342 		 * We're using one, so create a matching key
1343 		 * on the newsk structure. If we fail to get
1344 		 * memory, then we end up not copying the key
1345 		 * across. Shucks.
1346 		 */
1347 		tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1348 			       AF_INET, key->key, key->keylen, GFP_ATOMIC);
1349 		sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1350 	}
1351 #endif
1352 
1353 	if (__inet_inherit_port(sk, newsk) < 0)
1354 		goto put_and_exit;
1355 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1356 	if (likely(*own_req)) {
1357 		tcp_move_syn(newtp, req);
1358 		ireq->ireq_opt = NULL;
1359 	} else {
1360 		newinet->inet_opt = NULL;
1361 	}
1362 	return newsk;
1363 
1364 exit_overflow:
1365 	NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1366 exit_nonewsk:
1367 	dst_release(dst);
1368 exit:
1369 	tcp_listendrop(sk);
1370 	return NULL;
1371 put_and_exit:
1372 	newinet->inet_opt = NULL;
1373 	inet_csk_prepare_forced_close(newsk);
1374 	tcp_done(newsk);
1375 	goto exit;
1376 }
1377 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1378 
tcp_v4_cookie_check(struct sock * sk,struct sk_buff * skb)1379 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1380 {
1381 #ifdef CONFIG_SYN_COOKIES
1382 	const struct tcphdr *th = tcp_hdr(skb);
1383 
1384 	if (!th->syn)
1385 		sk = cookie_v4_check(sk, skb);
1386 #endif
1387 	return sk;
1388 }
1389 
1390 /* The socket must have it's spinlock held when we get
1391  * here, unless it is a TCP_LISTEN socket.
1392  *
1393  * We have a potential double-lock case here, so even when
1394  * doing backlog processing we use the BH locking scheme.
1395  * This is because we cannot sleep with the original spinlock
1396  * held.
1397  */
tcp_v4_do_rcv(struct sock * sk,struct sk_buff * skb)1398 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1399 {
1400 	struct sock *rsk;
1401 
1402 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1403 		struct dst_entry *dst = sk->sk_rx_dst;
1404 
1405 		sock_rps_save_rxhash(sk, skb);
1406 		sk_mark_napi_id(sk, skb);
1407 		if (dst) {
1408 			if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1409 			    !dst->ops->check(dst, 0)) {
1410 				dst_release(dst);
1411 				sk->sk_rx_dst = NULL;
1412 			}
1413 		}
1414 		tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1415 		return 0;
1416 	}
1417 
1418 	if (tcp_checksum_complete(skb))
1419 		goto csum_err;
1420 
1421 	if (sk->sk_state == TCP_LISTEN) {
1422 		struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1423 
1424 		if (!nsk)
1425 			goto discard;
1426 		if (nsk != sk) {
1427 			sock_rps_save_rxhash(nsk, skb);
1428 			sk_mark_napi_id(nsk, skb);
1429 			if (tcp_child_process(sk, nsk, skb)) {
1430 				rsk = nsk;
1431 				goto reset;
1432 			}
1433 			return 0;
1434 		}
1435 	} else
1436 		sock_rps_save_rxhash(sk, skb);
1437 
1438 	if (tcp_rcv_state_process(sk, skb)) {
1439 		rsk = sk;
1440 		goto reset;
1441 	}
1442 	return 0;
1443 
1444 reset:
1445 	tcp_v4_send_reset(rsk, skb);
1446 discard:
1447 	kfree_skb(skb);
1448 	/* Be careful here. If this function gets more complicated and
1449 	 * gcc suffers from register pressure on the x86, sk (in %ebx)
1450 	 * might be destroyed here. This current version compiles correctly,
1451 	 * but you have been warned.
1452 	 */
1453 	return 0;
1454 
1455 csum_err:
1456 	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1457 	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1458 	goto discard;
1459 }
1460 EXPORT_SYMBOL(tcp_v4_do_rcv);
1461 
tcp_v4_early_demux(struct sk_buff * skb)1462 void tcp_v4_early_demux(struct sk_buff *skb)
1463 {
1464 	const struct iphdr *iph;
1465 	const struct tcphdr *th;
1466 	struct sock *sk;
1467 
1468 	if (skb->pkt_type != PACKET_HOST)
1469 		return;
1470 
1471 	if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1472 		return;
1473 
1474 	iph = ip_hdr(skb);
1475 	th = tcp_hdr(skb);
1476 
1477 	if (th->doff < sizeof(struct tcphdr) / 4)
1478 		return;
1479 
1480 	sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1481 				       iph->saddr, th->source,
1482 				       iph->daddr, ntohs(th->dest),
1483 				       skb->skb_iif);
1484 	if (sk) {
1485 		skb->sk = sk;
1486 		skb->destructor = sock_edemux;
1487 		if (sk_fullsock(sk)) {
1488 			struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1489 
1490 			if (dst)
1491 				dst = dst_check(dst, 0);
1492 			if (dst &&
1493 			    inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1494 				skb_dst_set_noref(skb, dst);
1495 		}
1496 	}
1497 }
1498 
1499 /* Packet is added to VJ-style prequeue for processing in process
1500  * context, if a reader task is waiting. Apparently, this exciting
1501  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1502  * failed somewhere. Latency? Burstiness? Well, at least now we will
1503  * see, why it failed. 8)8)				  --ANK
1504  *
1505  */
tcp_prequeue(struct sock * sk,struct sk_buff * skb)1506 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1507 {
1508 	struct tcp_sock *tp = tcp_sk(sk);
1509 
1510 	if (sysctl_tcp_low_latency || !tp->ucopy.task)
1511 		return false;
1512 
1513 	if (skb->len <= tcp_hdrlen(skb) &&
1514 	    skb_queue_len(&tp->ucopy.prequeue) == 0)
1515 		return false;
1516 
1517 	/* Before escaping RCU protected region, we need to take care of skb
1518 	 * dst. Prequeue is only enabled for established sockets.
1519 	 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1520 	 * Instead of doing full sk_rx_dst validity here, let's perform
1521 	 * an optimistic check.
1522 	 */
1523 	if (likely(sk->sk_rx_dst))
1524 		skb_dst_drop(skb);
1525 	else
1526 		skb_dst_force_safe(skb);
1527 
1528 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
1529 	tp->ucopy.memory += skb->truesize;
1530 	if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
1531 	    tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
1532 		struct sk_buff *skb1;
1533 
1534 		BUG_ON(sock_owned_by_user(sk));
1535 		__NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
1536 				skb_queue_len(&tp->ucopy.prequeue));
1537 
1538 		while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1539 			sk_backlog_rcv(sk, skb1);
1540 
1541 		tp->ucopy.memory = 0;
1542 	} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1543 		wake_up_interruptible_sync_poll(sk_sleep(sk),
1544 					   POLLIN | POLLRDNORM | POLLRDBAND);
1545 		if (!inet_csk_ack_scheduled(sk))
1546 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1547 						  (3 * tcp_rto_min(sk)) / 4,
1548 						  TCP_RTO_MAX);
1549 	}
1550 	return true;
1551 }
1552 EXPORT_SYMBOL(tcp_prequeue);
1553 
tcp_add_backlog(struct sock * sk,struct sk_buff * skb)1554 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1555 {
1556 	u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1557 
1558 	/* Only socket owner can try to collapse/prune rx queues
1559 	 * to reduce memory overhead, so add a little headroom here.
1560 	 * Few sockets backlog are possibly concurrently non empty.
1561 	 */
1562 	limit += 64*1024;
1563 
1564 	/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1565 	 * we can fix skb->truesize to its real value to avoid future drops.
1566 	 * This is valid because skb is not yet charged to the socket.
1567 	 * It has been noticed pure SACK packets were sometimes dropped
1568 	 * (if cooked by drivers without copybreak feature).
1569 	 */
1570 	if (!skb->data_len)
1571 		skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
1572 
1573 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
1574 		bh_unlock_sock(sk);
1575 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1576 		return true;
1577 	}
1578 	return false;
1579 }
1580 EXPORT_SYMBOL(tcp_add_backlog);
1581 
tcp_filter(struct sock * sk,struct sk_buff * skb)1582 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1583 {
1584 	struct tcphdr *th = (struct tcphdr *)skb->data;
1585 	unsigned int eaten = skb->len;
1586 	int err;
1587 
1588 	err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1589 	if (!err) {
1590 		eaten -= skb->len;
1591 		TCP_SKB_CB(skb)->end_seq -= eaten;
1592 	}
1593 	return err;
1594 }
1595 EXPORT_SYMBOL(tcp_filter);
1596 
1597 /*
1598  *	From tcp_input.c
1599  */
1600 
tcp_v4_rcv(struct sk_buff * skb)1601 int tcp_v4_rcv(struct sk_buff *skb)
1602 {
1603 	struct net *net = dev_net(skb->dev);
1604 	const struct iphdr *iph;
1605 	const struct tcphdr *th;
1606 	bool refcounted;
1607 	struct sock *sk;
1608 	int ret;
1609 
1610 	if (skb->pkt_type != PACKET_HOST)
1611 		goto discard_it;
1612 
1613 	/* Count it even if it's bad */
1614 	__TCP_INC_STATS(net, TCP_MIB_INSEGS);
1615 
1616 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1617 		goto discard_it;
1618 
1619 	th = (const struct tcphdr *)skb->data;
1620 
1621 	if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1622 		goto bad_packet;
1623 	if (!pskb_may_pull(skb, th->doff * 4))
1624 		goto discard_it;
1625 
1626 	/* An explanation is required here, I think.
1627 	 * Packet length and doff are validated by header prediction,
1628 	 * provided case of th->doff==0 is eliminated.
1629 	 * So, we defer the checks. */
1630 
1631 	if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1632 		goto csum_error;
1633 
1634 	th = (const struct tcphdr *)skb->data;
1635 	iph = ip_hdr(skb);
1636 	/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1637 	 * barrier() makes sure compiler wont play fool^Waliasing games.
1638 	 */
1639 	memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1640 		sizeof(struct inet_skb_parm));
1641 	barrier();
1642 
1643 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1644 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1645 				    skb->len - th->doff * 4);
1646 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1647 	TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1648 	TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1649 	TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1650 	TCP_SKB_CB(skb)->sacked	 = 0;
1651 
1652 lookup:
1653 	sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1654 			       th->dest, &refcounted);
1655 	if (!sk)
1656 		goto no_tcp_socket;
1657 
1658 process:
1659 	if (sk->sk_state == TCP_TIME_WAIT)
1660 		goto do_time_wait;
1661 
1662 	if (sk->sk_state == TCP_NEW_SYN_RECV) {
1663 		struct request_sock *req = inet_reqsk(sk);
1664 		struct sock *nsk;
1665 
1666 		sk = req->rsk_listener;
1667 		if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1668 			sk_drops_add(sk, skb);
1669 			reqsk_put(req);
1670 			goto discard_it;
1671 		}
1672 		if (unlikely(sk->sk_state != TCP_LISTEN)) {
1673 			inet_csk_reqsk_queue_drop_and_put(sk, req);
1674 			goto lookup;
1675 		}
1676 		/* We own a reference on the listener, increase it again
1677 		 * as we might lose it too soon.
1678 		 */
1679 		sock_hold(sk);
1680 		refcounted = true;
1681 		nsk = NULL;
1682 		if (!tcp_filter(sk, skb))
1683 			nsk = tcp_check_req(sk, skb, req, false);
1684 		if (!nsk) {
1685 			reqsk_put(req);
1686 			goto discard_and_relse;
1687 		}
1688 		if (nsk == sk) {
1689 			reqsk_put(req);
1690 		} else if (tcp_child_process(sk, nsk, skb)) {
1691 			tcp_v4_send_reset(nsk, skb);
1692 			goto discard_and_relse;
1693 		} else {
1694 			sock_put(sk);
1695 			return 0;
1696 		}
1697 	}
1698 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1699 		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1700 		goto discard_and_relse;
1701 	}
1702 
1703 	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1704 		goto discard_and_relse;
1705 
1706 	if (tcp_v4_inbound_md5_hash(sk, skb))
1707 		goto discard_and_relse;
1708 
1709 	nf_reset(skb);
1710 
1711 	if (tcp_filter(sk, skb))
1712 		goto discard_and_relse;
1713 	th = (const struct tcphdr *)skb->data;
1714 	iph = ip_hdr(skb);
1715 
1716 	skb->dev = NULL;
1717 
1718 	if (sk->sk_state == TCP_LISTEN) {
1719 		ret = tcp_v4_do_rcv(sk, skb);
1720 		goto put_and_return;
1721 	}
1722 
1723 	sk_incoming_cpu_update(sk);
1724 
1725 	bh_lock_sock_nested(sk);
1726 	tcp_segs_in(tcp_sk(sk), skb);
1727 	ret = 0;
1728 	if (!sock_owned_by_user(sk)) {
1729 		if (!tcp_prequeue(sk, skb))
1730 			ret = tcp_v4_do_rcv(sk, skb);
1731 	} else if (tcp_add_backlog(sk, skb)) {
1732 		goto discard_and_relse;
1733 	}
1734 	bh_unlock_sock(sk);
1735 
1736 put_and_return:
1737 	if (refcounted)
1738 		sock_put(sk);
1739 
1740 	return ret;
1741 
1742 no_tcp_socket:
1743 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1744 		goto discard_it;
1745 
1746 	if (tcp_checksum_complete(skb)) {
1747 csum_error:
1748 		__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1749 bad_packet:
1750 		__TCP_INC_STATS(net, TCP_MIB_INERRS);
1751 	} else {
1752 		tcp_v4_send_reset(NULL, skb);
1753 	}
1754 
1755 discard_it:
1756 	/* Discard frame. */
1757 	kfree_skb(skb);
1758 	return 0;
1759 
1760 discard_and_relse:
1761 	sk_drops_add(sk, skb);
1762 	if (refcounted)
1763 		sock_put(sk);
1764 	goto discard_it;
1765 
1766 do_time_wait:
1767 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1768 		inet_twsk_put(inet_twsk(sk));
1769 		goto discard_it;
1770 	}
1771 
1772 	if (tcp_checksum_complete(skb)) {
1773 		inet_twsk_put(inet_twsk(sk));
1774 		goto csum_error;
1775 	}
1776 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1777 	case TCP_TW_SYN: {
1778 		struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1779 							&tcp_hashinfo, skb,
1780 							__tcp_hdrlen(th),
1781 							iph->saddr, th->source,
1782 							iph->daddr, th->dest,
1783 							inet_iif(skb));
1784 		if (sk2) {
1785 			inet_twsk_deschedule_put(inet_twsk(sk));
1786 			sk = sk2;
1787 			refcounted = false;
1788 			goto process;
1789 		}
1790 		/* Fall through to ACK */
1791 	}
1792 	case TCP_TW_ACK:
1793 		tcp_v4_timewait_ack(sk, skb);
1794 		break;
1795 	case TCP_TW_RST:
1796 		tcp_v4_send_reset(sk, skb);
1797 		inet_twsk_deschedule_put(inet_twsk(sk));
1798 		goto discard_it;
1799 	case TCP_TW_SUCCESS:;
1800 	}
1801 	goto discard_it;
1802 }
1803 
1804 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1805 	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
1806 	.twsk_unique	= tcp_twsk_unique,
1807 	.twsk_destructor= tcp_twsk_destructor,
1808 };
1809 
inet_sk_rx_dst_set(struct sock * sk,const struct sk_buff * skb)1810 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1811 {
1812 	struct dst_entry *dst = skb_dst(skb);
1813 
1814 	if (dst && dst_hold_safe(dst)) {
1815 		sk->sk_rx_dst = dst;
1816 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1817 	}
1818 }
1819 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1820 
1821 const struct inet_connection_sock_af_ops ipv4_specific = {
1822 	.queue_xmit	   = ip_queue_xmit,
1823 	.send_check	   = tcp_v4_send_check,
1824 	.rebuild_header	   = inet_sk_rebuild_header,
1825 	.sk_rx_dst_set	   = inet_sk_rx_dst_set,
1826 	.conn_request	   = tcp_v4_conn_request,
1827 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
1828 	.net_header_len	   = sizeof(struct iphdr),
1829 	.setsockopt	   = ip_setsockopt,
1830 	.getsockopt	   = ip_getsockopt,
1831 	.addr2sockaddr	   = inet_csk_addr2sockaddr,
1832 	.sockaddr_len	   = sizeof(struct sockaddr_in),
1833 	.bind_conflict	   = inet_csk_bind_conflict,
1834 #ifdef CONFIG_COMPAT
1835 	.compat_setsockopt = compat_ip_setsockopt,
1836 	.compat_getsockopt = compat_ip_getsockopt,
1837 #endif
1838 	.mtu_reduced	   = tcp_v4_mtu_reduced,
1839 };
1840 EXPORT_SYMBOL(ipv4_specific);
1841 
1842 #ifdef CONFIG_TCP_MD5SIG
1843 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1844 	.md5_lookup		= tcp_v4_md5_lookup,
1845 	.calc_md5_hash		= tcp_v4_md5_hash_skb,
1846 	.md5_parse		= tcp_v4_parse_md5_keys,
1847 };
1848 #endif
1849 
1850 /* NOTE: A lot of things set to zero explicitly by call to
1851  *       sk_alloc() so need not be done here.
1852  */
tcp_v4_init_sock(struct sock * sk)1853 static int tcp_v4_init_sock(struct sock *sk)
1854 {
1855 	struct inet_connection_sock *icsk = inet_csk(sk);
1856 
1857 	tcp_init_sock(sk);
1858 
1859 	icsk->icsk_af_ops = &ipv4_specific;
1860 
1861 #ifdef CONFIG_TCP_MD5SIG
1862 	tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1863 #endif
1864 
1865 	return 0;
1866 }
1867 
tcp_v4_destroy_sock(struct sock * sk)1868 void tcp_v4_destroy_sock(struct sock *sk)
1869 {
1870 	struct tcp_sock *tp = tcp_sk(sk);
1871 
1872 	tcp_clear_xmit_timers(sk);
1873 
1874 	tcp_cleanup_congestion_control(sk);
1875 
1876 	/* Cleanup up the write buffer. */
1877 	tcp_write_queue_purge(sk);
1878 
1879 	/* Cleans up our, hopefully empty, out_of_order_queue. */
1880 	skb_rbtree_purge(&tp->out_of_order_queue);
1881 
1882 #ifdef CONFIG_TCP_MD5SIG
1883 	/* Clean up the MD5 key list, if any */
1884 	if (tp->md5sig_info) {
1885 		tcp_clear_md5_list(sk);
1886 		kfree_rcu(tp->md5sig_info, rcu);
1887 		tp->md5sig_info = NULL;
1888 	}
1889 #endif
1890 
1891 	/* Clean prequeue, it must be empty really */
1892 	__skb_queue_purge(&tp->ucopy.prequeue);
1893 
1894 	/* Clean up a referenced TCP bind bucket. */
1895 	if (inet_csk(sk)->icsk_bind_hash)
1896 		inet_put_port(sk);
1897 
1898 	BUG_ON(tp->fastopen_rsk);
1899 
1900 	/* If socket is aborted during connect operation */
1901 	tcp_free_fastopen_req(tp);
1902 	tcp_saved_syn_free(tp);
1903 
1904 	local_bh_disable();
1905 	sk_sockets_allocated_dec(sk);
1906 	local_bh_enable();
1907 }
1908 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1909 
1910 #ifdef CONFIG_PROC_FS
1911 /* Proc filesystem TCP sock list dumping. */
1912 
1913 /*
1914  * Get next listener socket follow cur.  If cur is NULL, get first socket
1915  * starting from bucket given in st->bucket; when st->bucket is zero the
1916  * very first socket in the hash table is returned.
1917  */
listening_get_next(struct seq_file * seq,void * cur)1918 static void *listening_get_next(struct seq_file *seq, void *cur)
1919 {
1920 	struct tcp_iter_state *st = seq->private;
1921 	struct net *net = seq_file_net(seq);
1922 	struct inet_listen_hashbucket *ilb;
1923 	struct sock *sk = cur;
1924 
1925 	if (!sk) {
1926 get_head:
1927 		ilb = &tcp_hashinfo.listening_hash[st->bucket];
1928 		spin_lock_bh(&ilb->lock);
1929 		sk = sk_head(&ilb->head);
1930 		st->offset = 0;
1931 		goto get_sk;
1932 	}
1933 	ilb = &tcp_hashinfo.listening_hash[st->bucket];
1934 	++st->num;
1935 	++st->offset;
1936 
1937 	sk = sk_next(sk);
1938 get_sk:
1939 	sk_for_each_from(sk) {
1940 		if (!net_eq(sock_net(sk), net))
1941 			continue;
1942 		if (sk->sk_family == st->family)
1943 			return sk;
1944 	}
1945 	spin_unlock_bh(&ilb->lock);
1946 	st->offset = 0;
1947 	if (++st->bucket < INET_LHTABLE_SIZE)
1948 		goto get_head;
1949 	return NULL;
1950 }
1951 
listening_get_idx(struct seq_file * seq,loff_t * pos)1952 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1953 {
1954 	struct tcp_iter_state *st = seq->private;
1955 	void *rc;
1956 
1957 	st->bucket = 0;
1958 	st->offset = 0;
1959 	rc = listening_get_next(seq, NULL);
1960 
1961 	while (rc && *pos) {
1962 		rc = listening_get_next(seq, rc);
1963 		--*pos;
1964 	}
1965 	return rc;
1966 }
1967 
empty_bucket(const struct tcp_iter_state * st)1968 static inline bool empty_bucket(const struct tcp_iter_state *st)
1969 {
1970 	return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1971 }
1972 
1973 /*
1974  * Get first established socket starting from bucket given in st->bucket.
1975  * If st->bucket is zero, the very first socket in the hash is returned.
1976  */
established_get_first(struct seq_file * seq)1977 static void *established_get_first(struct seq_file *seq)
1978 {
1979 	struct tcp_iter_state *st = seq->private;
1980 	struct net *net = seq_file_net(seq);
1981 	void *rc = NULL;
1982 
1983 	st->offset = 0;
1984 	for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1985 		struct sock *sk;
1986 		struct hlist_nulls_node *node;
1987 		spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1988 
1989 		/* Lockless fast path for the common case of empty buckets */
1990 		if (empty_bucket(st))
1991 			continue;
1992 
1993 		spin_lock_bh(lock);
1994 		sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1995 			if (sk->sk_family != st->family ||
1996 			    !net_eq(sock_net(sk), net)) {
1997 				continue;
1998 			}
1999 			rc = sk;
2000 			goto out;
2001 		}
2002 		spin_unlock_bh(lock);
2003 	}
2004 out:
2005 	return rc;
2006 }
2007 
established_get_next(struct seq_file * seq,void * cur)2008 static void *established_get_next(struct seq_file *seq, void *cur)
2009 {
2010 	struct sock *sk = cur;
2011 	struct hlist_nulls_node *node;
2012 	struct tcp_iter_state *st = seq->private;
2013 	struct net *net = seq_file_net(seq);
2014 
2015 	++st->num;
2016 	++st->offset;
2017 
2018 	sk = sk_nulls_next(sk);
2019 
2020 	sk_nulls_for_each_from(sk, node) {
2021 		if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2022 			return sk;
2023 	}
2024 
2025 	spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2026 	++st->bucket;
2027 	return established_get_first(seq);
2028 }
2029 
established_get_idx(struct seq_file * seq,loff_t pos)2030 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2031 {
2032 	struct tcp_iter_state *st = seq->private;
2033 	void *rc;
2034 
2035 	st->bucket = 0;
2036 	rc = established_get_first(seq);
2037 
2038 	while (rc && pos) {
2039 		rc = established_get_next(seq, rc);
2040 		--pos;
2041 	}
2042 	return rc;
2043 }
2044 
tcp_get_idx(struct seq_file * seq,loff_t pos)2045 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2046 {
2047 	void *rc;
2048 	struct tcp_iter_state *st = seq->private;
2049 
2050 	st->state = TCP_SEQ_STATE_LISTENING;
2051 	rc	  = listening_get_idx(seq, &pos);
2052 
2053 	if (!rc) {
2054 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2055 		rc	  = established_get_idx(seq, pos);
2056 	}
2057 
2058 	return rc;
2059 }
2060 
tcp_seek_last_pos(struct seq_file * seq)2061 static void *tcp_seek_last_pos(struct seq_file *seq)
2062 {
2063 	struct tcp_iter_state *st = seq->private;
2064 	int offset = st->offset;
2065 	int orig_num = st->num;
2066 	void *rc = NULL;
2067 
2068 	switch (st->state) {
2069 	case TCP_SEQ_STATE_LISTENING:
2070 		if (st->bucket >= INET_LHTABLE_SIZE)
2071 			break;
2072 		st->state = TCP_SEQ_STATE_LISTENING;
2073 		rc = listening_get_next(seq, NULL);
2074 		while (offset-- && rc)
2075 			rc = listening_get_next(seq, rc);
2076 		if (rc)
2077 			break;
2078 		st->bucket = 0;
2079 		st->state = TCP_SEQ_STATE_ESTABLISHED;
2080 		/* Fallthrough */
2081 	case TCP_SEQ_STATE_ESTABLISHED:
2082 		if (st->bucket > tcp_hashinfo.ehash_mask)
2083 			break;
2084 		rc = established_get_first(seq);
2085 		while (offset-- && rc)
2086 			rc = established_get_next(seq, rc);
2087 	}
2088 
2089 	st->num = orig_num;
2090 
2091 	return rc;
2092 }
2093 
tcp_seq_start(struct seq_file * seq,loff_t * pos)2094 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2095 {
2096 	struct tcp_iter_state *st = seq->private;
2097 	void *rc;
2098 
2099 	if (*pos && *pos == st->last_pos) {
2100 		rc = tcp_seek_last_pos(seq);
2101 		if (rc)
2102 			goto out;
2103 	}
2104 
2105 	st->state = TCP_SEQ_STATE_LISTENING;
2106 	st->num = 0;
2107 	st->bucket = 0;
2108 	st->offset = 0;
2109 	rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2110 
2111 out:
2112 	st->last_pos = *pos;
2113 	return rc;
2114 }
2115 
tcp_seq_next(struct seq_file * seq,void * v,loff_t * pos)2116 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2117 {
2118 	struct tcp_iter_state *st = seq->private;
2119 	void *rc = NULL;
2120 
2121 	if (v == SEQ_START_TOKEN) {
2122 		rc = tcp_get_idx(seq, 0);
2123 		goto out;
2124 	}
2125 
2126 	switch (st->state) {
2127 	case TCP_SEQ_STATE_LISTENING:
2128 		rc = listening_get_next(seq, v);
2129 		if (!rc) {
2130 			st->state = TCP_SEQ_STATE_ESTABLISHED;
2131 			st->bucket = 0;
2132 			st->offset = 0;
2133 			rc	  = established_get_first(seq);
2134 		}
2135 		break;
2136 	case TCP_SEQ_STATE_ESTABLISHED:
2137 		rc = established_get_next(seq, v);
2138 		break;
2139 	}
2140 out:
2141 	++*pos;
2142 	st->last_pos = *pos;
2143 	return rc;
2144 }
2145 
tcp_seq_stop(struct seq_file * seq,void * v)2146 static void tcp_seq_stop(struct seq_file *seq, void *v)
2147 {
2148 	struct tcp_iter_state *st = seq->private;
2149 
2150 	switch (st->state) {
2151 	case TCP_SEQ_STATE_LISTENING:
2152 		if (v != SEQ_START_TOKEN)
2153 			spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2154 		break;
2155 	case TCP_SEQ_STATE_ESTABLISHED:
2156 		if (v)
2157 			spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2158 		break;
2159 	}
2160 }
2161 
tcp_seq_open(struct inode * inode,struct file * file)2162 int tcp_seq_open(struct inode *inode, struct file *file)
2163 {
2164 	struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2165 	struct tcp_iter_state *s;
2166 	int err;
2167 
2168 	err = seq_open_net(inode, file, &afinfo->seq_ops,
2169 			  sizeof(struct tcp_iter_state));
2170 	if (err < 0)
2171 		return err;
2172 
2173 	s = ((struct seq_file *)file->private_data)->private;
2174 	s->family		= afinfo->family;
2175 	s->last_pos		= 0;
2176 	return 0;
2177 }
2178 EXPORT_SYMBOL(tcp_seq_open);
2179 
tcp_proc_register(struct net * net,struct tcp_seq_afinfo * afinfo)2180 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2181 {
2182 	int rc = 0;
2183 	struct proc_dir_entry *p;
2184 
2185 	afinfo->seq_ops.start		= tcp_seq_start;
2186 	afinfo->seq_ops.next		= tcp_seq_next;
2187 	afinfo->seq_ops.stop		= tcp_seq_stop;
2188 
2189 	p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2190 			     afinfo->seq_fops, afinfo);
2191 	if (!p)
2192 		rc = -ENOMEM;
2193 	return rc;
2194 }
2195 EXPORT_SYMBOL(tcp_proc_register);
2196 
tcp_proc_unregister(struct net * net,struct tcp_seq_afinfo * afinfo)2197 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2198 {
2199 	remove_proc_entry(afinfo->name, net->proc_net);
2200 }
2201 EXPORT_SYMBOL(tcp_proc_unregister);
2202 
get_openreq4(const struct request_sock * req,struct seq_file * f,int i)2203 static void get_openreq4(const struct request_sock *req,
2204 			 struct seq_file *f, int i)
2205 {
2206 	const struct inet_request_sock *ireq = inet_rsk(req);
2207 	long delta = req->rsk_timer.expires - jiffies;
2208 
2209 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2210 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2211 		i,
2212 		ireq->ir_loc_addr,
2213 		ireq->ir_num,
2214 		ireq->ir_rmt_addr,
2215 		ntohs(ireq->ir_rmt_port),
2216 		TCP_SYN_RECV,
2217 		0, 0, /* could print option size, but that is af dependent. */
2218 		1,    /* timers active (only the expire timer) */
2219 		jiffies_delta_to_clock_t(delta),
2220 		req->num_timeout,
2221 		from_kuid_munged(seq_user_ns(f),
2222 				 sock_i_uid(req->rsk_listener)),
2223 		0,  /* non standard timer */
2224 		0, /* open_requests have no inode */
2225 		0,
2226 		req);
2227 }
2228 
get_tcp4_sock(struct sock * sk,struct seq_file * f,int i)2229 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2230 {
2231 	int timer_active;
2232 	unsigned long timer_expires;
2233 	const struct tcp_sock *tp = tcp_sk(sk);
2234 	const struct inet_connection_sock *icsk = inet_csk(sk);
2235 	const struct inet_sock *inet = inet_sk(sk);
2236 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2237 	__be32 dest = inet->inet_daddr;
2238 	__be32 src = inet->inet_rcv_saddr;
2239 	__u16 destp = ntohs(inet->inet_dport);
2240 	__u16 srcp = ntohs(inet->inet_sport);
2241 	int rx_queue;
2242 	int state;
2243 
2244 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2245 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2246 	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2247 		timer_active	= 1;
2248 		timer_expires	= icsk->icsk_timeout;
2249 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2250 		timer_active	= 4;
2251 		timer_expires	= icsk->icsk_timeout;
2252 	} else if (timer_pending(&sk->sk_timer)) {
2253 		timer_active	= 2;
2254 		timer_expires	= sk->sk_timer.expires;
2255 	} else {
2256 		timer_active	= 0;
2257 		timer_expires = jiffies;
2258 	}
2259 
2260 	state = sk_state_load(sk);
2261 	if (state == TCP_LISTEN)
2262 		rx_queue = sk->sk_ack_backlog;
2263 	else
2264 		/* Because we don't lock the socket,
2265 		 * we might find a transient negative value.
2266 		 */
2267 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2268 
2269 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2270 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2271 		i, src, srcp, dest, destp, state,
2272 		tp->write_seq - tp->snd_una,
2273 		rx_queue,
2274 		timer_active,
2275 		jiffies_delta_to_clock_t(timer_expires - jiffies),
2276 		icsk->icsk_retransmits,
2277 		from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2278 		icsk->icsk_probes_out,
2279 		sock_i_ino(sk),
2280 		atomic_read(&sk->sk_refcnt), sk,
2281 		jiffies_to_clock_t(icsk->icsk_rto),
2282 		jiffies_to_clock_t(icsk->icsk_ack.ato),
2283 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2284 		tp->snd_cwnd,
2285 		state == TCP_LISTEN ?
2286 		    fastopenq->max_qlen :
2287 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2288 }
2289 
get_timewait4_sock(const struct inet_timewait_sock * tw,struct seq_file * f,int i)2290 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2291 			       struct seq_file *f, int i)
2292 {
2293 	long delta = tw->tw_timer.expires - jiffies;
2294 	__be32 dest, src;
2295 	__u16 destp, srcp;
2296 
2297 	dest  = tw->tw_daddr;
2298 	src   = tw->tw_rcv_saddr;
2299 	destp = ntohs(tw->tw_dport);
2300 	srcp  = ntohs(tw->tw_sport);
2301 
2302 	seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2303 		" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2304 		i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2305 		3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2306 		atomic_read(&tw->tw_refcnt), tw);
2307 }
2308 
2309 #define TMPSZ 150
2310 
tcp4_seq_show(struct seq_file * seq,void * v)2311 static int tcp4_seq_show(struct seq_file *seq, void *v)
2312 {
2313 	struct tcp_iter_state *st;
2314 	struct sock *sk = v;
2315 
2316 	seq_setwidth(seq, TMPSZ - 1);
2317 	if (v == SEQ_START_TOKEN) {
2318 		seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2319 			   "rx_queue tr tm->when retrnsmt   uid  timeout "
2320 			   "inode");
2321 		goto out;
2322 	}
2323 	st = seq->private;
2324 
2325 	if (sk->sk_state == TCP_TIME_WAIT)
2326 		get_timewait4_sock(v, seq, st->num);
2327 	else if (sk->sk_state == TCP_NEW_SYN_RECV)
2328 		get_openreq4(v, seq, st->num);
2329 	else
2330 		get_tcp4_sock(v, seq, st->num);
2331 out:
2332 	seq_pad(seq, '\n');
2333 	return 0;
2334 }
2335 
2336 static const struct file_operations tcp_afinfo_seq_fops = {
2337 	.owner   = THIS_MODULE,
2338 	.open    = tcp_seq_open,
2339 	.read    = seq_read,
2340 	.llseek  = seq_lseek,
2341 	.release = seq_release_net
2342 };
2343 
2344 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2345 	.name		= "tcp",
2346 	.family		= AF_INET,
2347 	.seq_fops	= &tcp_afinfo_seq_fops,
2348 	.seq_ops	= {
2349 		.show		= tcp4_seq_show,
2350 	},
2351 };
2352 
tcp4_proc_init_net(struct net * net)2353 static int __net_init tcp4_proc_init_net(struct net *net)
2354 {
2355 	return tcp_proc_register(net, &tcp4_seq_afinfo);
2356 }
2357 
tcp4_proc_exit_net(struct net * net)2358 static void __net_exit tcp4_proc_exit_net(struct net *net)
2359 {
2360 	tcp_proc_unregister(net, &tcp4_seq_afinfo);
2361 }
2362 
2363 static struct pernet_operations tcp4_net_ops = {
2364 	.init = tcp4_proc_init_net,
2365 	.exit = tcp4_proc_exit_net,
2366 };
2367 
tcp4_proc_init(void)2368 int __init tcp4_proc_init(void)
2369 {
2370 	return register_pernet_subsys(&tcp4_net_ops);
2371 }
2372 
tcp4_proc_exit(void)2373 void tcp4_proc_exit(void)
2374 {
2375 	unregister_pernet_subsys(&tcp4_net_ops);
2376 }
2377 #endif /* CONFIG_PROC_FS */
2378 
2379 struct proto tcp_prot = {
2380 	.name			= "TCP",
2381 	.owner			= THIS_MODULE,
2382 	.close			= tcp_close,
2383 	.connect		= tcp_v4_connect,
2384 	.disconnect		= tcp_disconnect,
2385 	.accept			= inet_csk_accept,
2386 	.ioctl			= tcp_ioctl,
2387 	.init			= tcp_v4_init_sock,
2388 	.destroy		= tcp_v4_destroy_sock,
2389 	.shutdown		= tcp_shutdown,
2390 	.setsockopt		= tcp_setsockopt,
2391 	.getsockopt		= tcp_getsockopt,
2392 	.recvmsg		= tcp_recvmsg,
2393 	.sendmsg		= tcp_sendmsg,
2394 	.sendpage		= tcp_sendpage,
2395 	.backlog_rcv		= tcp_v4_do_rcv,
2396 	.release_cb		= tcp_release_cb,
2397 	.hash			= inet_hash,
2398 	.unhash			= inet_unhash,
2399 	.get_port		= inet_csk_get_port,
2400 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2401 	.stream_memory_free	= tcp_stream_memory_free,
2402 	.sockets_allocated	= &tcp_sockets_allocated,
2403 	.orphan_count		= &tcp_orphan_count,
2404 	.memory_allocated	= &tcp_memory_allocated,
2405 	.memory_pressure	= &tcp_memory_pressure,
2406 	.sysctl_mem		= sysctl_tcp_mem,
2407 	.sysctl_wmem		= sysctl_tcp_wmem,
2408 	.sysctl_rmem		= sysctl_tcp_rmem,
2409 	.max_header		= MAX_TCP_HEADER,
2410 	.obj_size		= sizeof(struct tcp_sock),
2411 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2412 	.twsk_prot		= &tcp_timewait_sock_ops,
2413 	.rsk_prot		= &tcp_request_sock_ops,
2414 	.h.hashinfo		= &tcp_hashinfo,
2415 	.no_autobind		= true,
2416 #ifdef CONFIG_COMPAT
2417 	.compat_setsockopt	= compat_tcp_setsockopt,
2418 	.compat_getsockopt	= compat_tcp_getsockopt,
2419 #endif
2420 	.diag_destroy		= tcp_abort,
2421 };
2422 EXPORT_SYMBOL(tcp_prot);
2423 
tcp_sk_exit(struct net * net)2424 static void __net_exit tcp_sk_exit(struct net *net)
2425 {
2426 	int cpu;
2427 
2428 	for_each_possible_cpu(cpu)
2429 		inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2430 	free_percpu(net->ipv4.tcp_sk);
2431 }
2432 
tcp_sk_init(struct net * net)2433 static int __net_init tcp_sk_init(struct net *net)
2434 {
2435 	int res, cpu;
2436 
2437 	net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2438 	if (!net->ipv4.tcp_sk)
2439 		return -ENOMEM;
2440 
2441 	for_each_possible_cpu(cpu) {
2442 		struct sock *sk;
2443 
2444 		res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2445 					   IPPROTO_TCP, net);
2446 		if (res)
2447 			goto fail;
2448 		sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2449 		*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2450 	}
2451 
2452 	net->ipv4.sysctl_tcp_ecn = 2;
2453 	net->ipv4.sysctl_tcp_ecn_fallback = 1;
2454 
2455 	net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2456 	net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2457 	net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2458 
2459 	net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2460 	net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2461 	net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2462 
2463 	net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2464 	net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2465 	net->ipv4.sysctl_tcp_syncookies = 1;
2466 	net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2467 	net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2468 	net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2469 	net->ipv4.sysctl_tcp_orphan_retries = 0;
2470 	net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2471 	net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2472 
2473 	return 0;
2474 fail:
2475 	tcp_sk_exit(net);
2476 
2477 	return res;
2478 }
2479 
tcp_sk_exit_batch(struct list_head * net_exit_list)2480 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2481 {
2482 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2483 }
2484 
2485 static struct pernet_operations __net_initdata tcp_sk_ops = {
2486        .init	   = tcp_sk_init,
2487        .exit	   = tcp_sk_exit,
2488        .exit_batch = tcp_sk_exit_batch,
2489 };
2490 
tcp_v4_init(void)2491 void __init tcp_v4_init(void)
2492 {
2493 	inet_hashinfo_init(&tcp_hashinfo);
2494 	if (register_pernet_subsys(&tcp_sk_ops))
2495 		panic("Failed to create the TCP control socket.\n");
2496 }
2497