1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24 /*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
53 #define pr_fmt(fmt) "TCP: " fmt
54
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
65
66 #include <net/net_namespace.h>
67 #include <net/icmp.h>
68 #include <net/inet_hashtables.h>
69 #include <net/tcp.h>
70 #include <net/transp_v6.h>
71 #include <net/ipv6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
74 #include <net/xfrm.h>
75 #include <net/secure_seq.h>
76 #include <net/tcp_memcontrol.h>
77 #include <net/busy_poll.h>
78
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
84
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
87
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
91
92 #ifdef CONFIG_TCP_MD5SIG
93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
94 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 #endif
96
97 struct inet_hashinfo tcp_hashinfo;
98 EXPORT_SYMBOL(tcp_hashinfo);
99
tcp_v4_init_sequence(const struct sk_buff * skb)100 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 ip_hdr(skb)->saddr,
104 tcp_hdr(skb)->dest,
105 tcp_hdr(skb)->source);
106 }
107
tcp_twsk_unique(struct sock * sk,struct sock * sktw,void * twp)108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
112
113 /* With PAWS, it is safe from the viewpoint
114 of data integrity. Even without PAWS it is safe provided sequence
115 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116
117 Actually, the idea is close to VJ's one, only timestamp cache is
118 held not per host, but per port pair and TW bucket is used as state
119 holder.
120
121 If TW bucket has been already destroyed we fall back to VJ's scheme
122 and use initial timestamp retrieved from peer table.
123 */
124 if (tcptw->tw_ts_recent_stamp &&
125 (twp == NULL || (sysctl_tcp_tw_reuse &&
126 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128 if (tp->write_seq == 0)
129 tp->write_seq = 1;
130 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
131 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132 sock_hold(sktw);
133 return 1;
134 }
135
136 return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139
140 /* This will initiate an outgoing connection. */
tcp_v4_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144 struct inet_sock *inet = inet_sk(sk);
145 struct tcp_sock *tp = tcp_sk(sk);
146 __be16 orig_sport, orig_dport;
147 __be32 daddr, nexthop;
148 struct flowi4 *fl4;
149 struct rtable *rt;
150 int err;
151 struct ip_options_rcu *inet_opt;
152
153 if (addr_len < sizeof(struct sockaddr_in))
154 return -EINVAL;
155
156 if (usin->sin_family != AF_INET)
157 return -EAFNOSUPPORT;
158
159 nexthop = daddr = usin->sin_addr.s_addr;
160 inet_opt = rcu_dereference_protected(inet->inet_opt,
161 sock_owned_by_user(sk));
162 if (inet_opt && inet_opt->opt.srr) {
163 if (!daddr)
164 return -EINVAL;
165 nexthop = inet_opt->opt.faddr;
166 }
167
168 orig_sport = inet->inet_sport;
169 orig_dport = usin->sin_port;
170 fl4 = &inet->cork.fl.u.ip4;
171 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173 IPPROTO_TCP,
174 orig_sport, orig_dport, sk);
175 if (IS_ERR(rt)) {
176 err = PTR_ERR(rt);
177 if (err == -ENETUNREACH)
178 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179 return err;
180 }
181
182 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183 ip_rt_put(rt);
184 return -ENETUNREACH;
185 }
186
187 if (!inet_opt || !inet_opt->opt.srr)
188 daddr = fl4->daddr;
189
190 if (!inet->inet_saddr)
191 inet->inet_saddr = fl4->saddr;
192 inet->inet_rcv_saddr = inet->inet_saddr;
193
194 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195 /* Reset inherited state */
196 tp->rx_opt.ts_recent = 0;
197 tp->rx_opt.ts_recent_stamp = 0;
198 if (likely(!tp->repair))
199 tp->write_seq = 0;
200 }
201
202 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204 tcp_fetch_timewait_stamp(sk, &rt->dst);
205
206 inet->inet_dport = usin->sin_port;
207 inet->inet_daddr = daddr;
208
209 inet_csk(sk)->icsk_ext_hdr_len = 0;
210 if (inet_opt)
211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212
213 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214
215 /* Socket identity is still unknown (sport may be zero).
216 * However we set state to SYN-SENT and not releasing socket
217 * lock select source port, enter ourselves into the hash tables and
218 * complete initialization after this.
219 */
220 tcp_set_state(sk, TCP_SYN_SENT);
221 err = inet_hash_connect(&tcp_death_row, sk);
222 if (err)
223 goto failure;
224
225 inet_set_txhash(sk);
226
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) {
230 err = PTR_ERR(rt);
231 rt = NULL;
232 goto failure;
233 }
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
237
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240 inet->inet_daddr,
241 inet->inet_sport,
242 usin->sin_port);
243
244 inet->inet_id = tp->write_seq ^ jiffies;
245
246 err = tcp_connect(sk);
247
248 rt = NULL;
249 if (err)
250 goto failure;
251
252 return 0;
253
254 failure:
255 /*
256 * This unhashes the socket and releases the local port,
257 * if necessary.
258 */
259 tcp_set_state(sk, TCP_CLOSE);
260 ip_rt_put(rt);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
263 return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266
267 /*
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
271 */
tcp_v4_mtu_reduced(struct sock * sk)272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274 struct inet_sock *inet = inet_sk(sk);
275 struct dst_entry *dst;
276 u32 mtu;
277
278 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
279 return;
280 mtu = tcp_sk(sk)->mtu_info;
281 dst = inet_csk_update_pmtu(sk, mtu);
282 if (!dst)
283 return;
284
285 /* Something is about to be wrong... Remember soft error
286 * for the case, if this connection will not able to recover.
287 */
288 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
289 sk->sk_err_soft = EMSGSIZE;
290
291 mtu = dst_mtu(dst);
292
293 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
294 ip_sk_accept_pmtu(sk) &&
295 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
296 tcp_sync_mss(sk, mtu);
297
298 /* Resend the TCP packet because it's
299 * clear that the old packet has been
300 * dropped. This is the new "fast" path mtu
301 * discovery.
302 */
303 tcp_simple_retransmit(sk);
304 } /* else let the usual retransmit timer handle it */
305 }
306 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
307
do_redirect(struct sk_buff * skb,struct sock * sk)308 static void do_redirect(struct sk_buff *skb, struct sock *sk)
309 {
310 struct dst_entry *dst = __sk_dst_check(sk, 0);
311
312 if (dst)
313 dst->ops->redirect(dst, sk, skb);
314 }
315
316 /*
317 * This routine is called by the ICMP module when it gets some
318 * sort of error condition. If err < 0 then the socket should
319 * be closed and the error returned to the user. If err > 0
320 * it's just the icmp type << 8 | icmp code. After adjustment
321 * header points to the first 8 bytes of the tcp header. We need
322 * to find the appropriate port.
323 *
324 * The locking strategy used here is very "optimistic". When
325 * someone else accesses the socket the ICMP is just dropped
326 * and for some paths there is no check at all.
327 * A more general error queue to queue errors for later handling
328 * is probably better.
329 *
330 */
331
tcp_v4_err(struct sk_buff * icmp_skb,u32 info)332 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
333 {
334 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
335 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
336 struct inet_connection_sock *icsk;
337 struct tcp_sock *tp;
338 struct inet_sock *inet;
339 const int type = icmp_hdr(icmp_skb)->type;
340 const int code = icmp_hdr(icmp_skb)->code;
341 struct sock *sk;
342 struct sk_buff *skb;
343 struct request_sock *fastopen;
344 __u32 seq, snd_una;
345 __u32 remaining;
346 int err;
347 struct net *net = dev_net(icmp_skb->dev);
348
349 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
350 iph->saddr, th->source, inet_iif(icmp_skb));
351 if (!sk) {
352 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
353 return;
354 }
355 if (sk->sk_state == TCP_TIME_WAIT) {
356 inet_twsk_put(inet_twsk(sk));
357 return;
358 }
359
360 bh_lock_sock(sk);
361 /* If too many ICMPs get dropped on busy
362 * servers this needs to be solved differently.
363 * We do take care of PMTU discovery (RFC1191) special case :
364 * we can receive locally generated ICMP messages while socket is held.
365 */
366 if (sock_owned_by_user(sk)) {
367 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
368 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
369 }
370 if (sk->sk_state == TCP_CLOSE)
371 goto out;
372
373 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
375 goto out;
376 }
377
378 icsk = inet_csk(sk);
379 tp = tcp_sk(sk);
380 seq = ntohl(th->seq);
381 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
382 fastopen = tp->fastopen_rsk;
383 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
384 if (sk->sk_state != TCP_LISTEN &&
385 !between(seq, snd_una, tp->snd_nxt)) {
386 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
387 goto out;
388 }
389
390 switch (type) {
391 case ICMP_REDIRECT:
392 if (!sock_owned_by_user(sk))
393 do_redirect(icmp_skb, sk);
394 goto out;
395 case ICMP_SOURCE_QUENCH:
396 /* Just silently ignore these. */
397 goto out;
398 case ICMP_PARAMETERPROB:
399 err = EPROTO;
400 break;
401 case ICMP_DEST_UNREACH:
402 if (code > NR_ICMP_UNREACH)
403 goto out;
404
405 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
406 /* We are not interested in TCP_LISTEN and open_requests
407 * (SYN-ACKs send out by Linux are always <576bytes so
408 * they should go through unfragmented).
409 */
410 if (sk->sk_state == TCP_LISTEN)
411 goto out;
412
413 tp->mtu_info = info;
414 if (!sock_owned_by_user(sk)) {
415 tcp_v4_mtu_reduced(sk);
416 } else {
417 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
418 sock_hold(sk);
419 }
420 goto out;
421 }
422
423 err = icmp_err_convert[code].errno;
424 /* check if icmp_skb allows revert of backoff
425 * (see draft-zimmermann-tcp-lcd) */
426 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
427 break;
428 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
429 !icsk->icsk_backoff || fastopen)
430 break;
431
432 if (sock_owned_by_user(sk))
433 break;
434
435 icsk->icsk_backoff--;
436 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
437 TCP_TIMEOUT_INIT;
438 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
439
440 skb = tcp_write_queue_head(sk);
441 BUG_ON(!skb);
442
443 remaining = icsk->icsk_rto -
444 min(icsk->icsk_rto,
445 tcp_time_stamp - tcp_skb_timestamp(skb));
446
447 if (remaining) {
448 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
449 remaining, TCP_RTO_MAX);
450 } else {
451 /* RTO revert clocked out retransmission.
452 * Will retransmit now */
453 tcp_retransmit_timer(sk);
454 }
455
456 break;
457 case ICMP_TIME_EXCEEDED:
458 err = EHOSTUNREACH;
459 break;
460 default:
461 goto out;
462 }
463
464 switch (sk->sk_state) {
465 struct request_sock *req, **prev;
466 case TCP_LISTEN:
467 if (sock_owned_by_user(sk))
468 goto out;
469
470 req = inet_csk_search_req(sk, &prev, th->dest,
471 iph->daddr, iph->saddr);
472 if (!req)
473 goto out;
474
475 /* ICMPs are not backlogged, hence we cannot get
476 an established socket here.
477 */
478 WARN_ON(req->sk);
479
480 if (seq != tcp_rsk(req)->snt_isn) {
481 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
482 goto out;
483 }
484
485 /*
486 * Still in SYN_RECV, just remove it silently.
487 * There is no good way to pass the error to the newly
488 * created socket, and POSIX does not want network
489 * errors returned from accept().
490 */
491 inet_csk_reqsk_queue_drop(sk, req, prev);
492 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
493 goto out;
494
495 case TCP_SYN_SENT:
496 case TCP_SYN_RECV:
497 /* Only in fast or simultaneous open. If a fast open socket is
498 * is already accepted it is treated as a connected one below.
499 */
500 if (fastopen && fastopen->sk == NULL)
501 break;
502
503 if (!sock_owned_by_user(sk)) {
504 sk->sk_err = err;
505
506 sk->sk_error_report(sk);
507
508 tcp_done(sk);
509 } else {
510 sk->sk_err_soft = err;
511 }
512 goto out;
513 }
514
515 /* If we've already connected we will keep trying
516 * until we time out, or the user gives up.
517 *
518 * rfc1122 4.2.3.9 allows to consider as hard errors
519 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
520 * but it is obsoleted by pmtu discovery).
521 *
522 * Note, that in modern internet, where routing is unreliable
523 * and in each dark corner broken firewalls sit, sending random
524 * errors ordered by their masters even this two messages finally lose
525 * their original sense (even Linux sends invalid PORT_UNREACHs)
526 *
527 * Now we are in compliance with RFCs.
528 * --ANK (980905)
529 */
530
531 inet = inet_sk(sk);
532 if (!sock_owned_by_user(sk) && inet->recverr) {
533 sk->sk_err = err;
534 sk->sk_error_report(sk);
535 } else { /* Only an error on timeout */
536 sk->sk_err_soft = err;
537 }
538
539 out:
540 bh_unlock_sock(sk);
541 sock_put(sk);
542 }
543
__tcp_v4_send_check(struct sk_buff * skb,__be32 saddr,__be32 daddr)544 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
545 {
546 struct tcphdr *th = tcp_hdr(skb);
547
548 if (skb->ip_summed == CHECKSUM_PARTIAL) {
549 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
550 skb->csum_start = skb_transport_header(skb) - skb->head;
551 skb->csum_offset = offsetof(struct tcphdr, check);
552 } else {
553 th->check = tcp_v4_check(skb->len, saddr, daddr,
554 csum_partial(th,
555 th->doff << 2,
556 skb->csum));
557 }
558 }
559
560 /* This routine computes an IPv4 TCP checksum. */
tcp_v4_send_check(struct sock * sk,struct sk_buff * skb)561 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
562 {
563 const struct inet_sock *inet = inet_sk(sk);
564
565 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
566 }
567 EXPORT_SYMBOL(tcp_v4_send_check);
568
569 /*
570 * This routine will send an RST to the other tcp.
571 *
572 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
573 * for reset.
574 * Answer: if a packet caused RST, it is not for a socket
575 * existing in our system, if it is matched to a socket,
576 * it is just duplicate segment or bug in other side's TCP.
577 * So that we build reply only basing on parameters
578 * arrived with segment.
579 * Exception: precedence violation. We do not implement it in any case.
580 */
581
tcp_v4_send_reset(struct sock * sk,struct sk_buff * skb)582 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
583 {
584 const struct tcphdr *th = tcp_hdr(skb);
585 struct {
586 struct tcphdr th;
587 #ifdef CONFIG_TCP_MD5SIG
588 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
589 #endif
590 } rep;
591 struct ip_reply_arg arg;
592 #ifdef CONFIG_TCP_MD5SIG
593 struct tcp_md5sig_key *key;
594 const __u8 *hash_location = NULL;
595 unsigned char newhash[16];
596 int genhash;
597 struct sock *sk1 = NULL;
598 #endif
599 struct net *net;
600
601 /* Never send a reset in response to a reset. */
602 if (th->rst)
603 return;
604
605 /* If sk not NULL, it means we did a successful lookup and incoming
606 * route had to be correct. prequeue might have dropped our dst.
607 */
608 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
609 return;
610
611 /* Swap the send and the receive. */
612 memset(&rep, 0, sizeof(rep));
613 rep.th.dest = th->source;
614 rep.th.source = th->dest;
615 rep.th.doff = sizeof(struct tcphdr) / 4;
616 rep.th.rst = 1;
617
618 if (th->ack) {
619 rep.th.seq = th->ack_seq;
620 } else {
621 rep.th.ack = 1;
622 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
623 skb->len - (th->doff << 2));
624 }
625
626 memset(&arg, 0, sizeof(arg));
627 arg.iov[0].iov_base = (unsigned char *)&rep;
628 arg.iov[0].iov_len = sizeof(rep.th);
629
630 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
631 #ifdef CONFIG_TCP_MD5SIG
632 hash_location = tcp_parse_md5sig_option(th);
633 if (!sk && hash_location) {
634 /*
635 * active side is lost. Try to find listening socket through
636 * source port, and then find md5 key through listening socket.
637 * we are not loose security here:
638 * Incoming packet is checked with md5 hash with finding key,
639 * no RST generated if md5 hash doesn't match.
640 */
641 sk1 = __inet_lookup_listener(net,
642 &tcp_hashinfo, ip_hdr(skb)->saddr,
643 th->source, ip_hdr(skb)->daddr,
644 ntohs(th->source), inet_iif(skb));
645 /* don't send rst if it can't find key */
646 if (!sk1)
647 return;
648 rcu_read_lock();
649 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
650 &ip_hdr(skb)->saddr, AF_INET);
651 if (!key)
652 goto release_sk1;
653
654 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
655 if (genhash || memcmp(hash_location, newhash, 16) != 0)
656 goto release_sk1;
657 } else {
658 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
659 &ip_hdr(skb)->saddr,
660 AF_INET) : NULL;
661 }
662
663 if (key) {
664 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
665 (TCPOPT_NOP << 16) |
666 (TCPOPT_MD5SIG << 8) |
667 TCPOLEN_MD5SIG);
668 /* Update length and the length the header thinks exists */
669 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
670 rep.th.doff = arg.iov[0].iov_len / 4;
671
672 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
673 key, ip_hdr(skb)->saddr,
674 ip_hdr(skb)->daddr, &rep.th);
675 }
676 #endif
677 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
678 ip_hdr(skb)->saddr, /* XXX */
679 arg.iov[0].iov_len, IPPROTO_TCP, 0);
680 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
681 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
682 /* When socket is gone, all binding information is lost.
683 * routing might fail in this case. No choice here, if we choose to force
684 * input interface, we will misroute in case of asymmetric route.
685 */
686 if (sk)
687 arg.bound_dev_if = sk->sk_bound_dev_if;
688
689 arg.tos = ip_hdr(skb)->tos;
690 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
691 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
692 skb, &TCP_SKB_CB(skb)->header.h4.opt,
693 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
694 &arg, arg.iov[0].iov_len);
695
696 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
697 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
698
699 #ifdef CONFIG_TCP_MD5SIG
700 release_sk1:
701 if (sk1) {
702 rcu_read_unlock();
703 sock_put(sk1);
704 }
705 #endif
706 }
707
708 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
709 outside socket context is ugly, certainly. What can I do?
710 */
711
tcp_v4_send_ack(const struct sock * sk,struct sk_buff * skb,u32 seq,u32 ack,u32 win,u32 tsval,u32 tsecr,int oif,struct tcp_md5sig_key * key,int reply_flags,u8 tos)712 static void tcp_v4_send_ack(const struct sock *sk, struct sk_buff *skb,
713 u32 seq, u32 ack,
714 u32 win, u32 tsval, u32 tsecr, int oif,
715 struct tcp_md5sig_key *key,
716 int reply_flags, u8 tos)
717 {
718 const struct tcphdr *th = tcp_hdr(skb);
719 struct {
720 struct tcphdr th;
721 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
722 #ifdef CONFIG_TCP_MD5SIG
723 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
724 #endif
725 ];
726 } rep;
727 struct ip_reply_arg arg;
728 struct net *net = sock_net(sk);
729
730 memset(&rep.th, 0, sizeof(struct tcphdr));
731 memset(&arg, 0, sizeof(arg));
732
733 arg.iov[0].iov_base = (unsigned char *)&rep;
734 arg.iov[0].iov_len = sizeof(rep.th);
735 if (tsecr) {
736 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
737 (TCPOPT_TIMESTAMP << 8) |
738 TCPOLEN_TIMESTAMP);
739 rep.opt[1] = htonl(tsval);
740 rep.opt[2] = htonl(tsecr);
741 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
742 }
743
744 /* Swap the send and the receive. */
745 rep.th.dest = th->source;
746 rep.th.source = th->dest;
747 rep.th.doff = arg.iov[0].iov_len / 4;
748 rep.th.seq = htonl(seq);
749 rep.th.ack_seq = htonl(ack);
750 rep.th.ack = 1;
751 rep.th.window = htons(win);
752
753 #ifdef CONFIG_TCP_MD5SIG
754 if (key) {
755 int offset = (tsecr) ? 3 : 0;
756
757 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
758 (TCPOPT_NOP << 16) |
759 (TCPOPT_MD5SIG << 8) |
760 TCPOLEN_MD5SIG);
761 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
762 rep.th.doff = arg.iov[0].iov_len/4;
763
764 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
765 key, ip_hdr(skb)->saddr,
766 ip_hdr(skb)->daddr, &rep.th);
767 }
768 #endif
769 arg.flags = reply_flags;
770 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
771 ip_hdr(skb)->saddr, /* XXX */
772 arg.iov[0].iov_len, IPPROTO_TCP, 0);
773 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
774 if (oif)
775 arg.bound_dev_if = oif;
776 arg.tos = tos;
777 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
778 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
779 skb, &TCP_SKB_CB(skb)->header.h4.opt,
780 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
781 &arg, arg.iov[0].iov_len);
782
783 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
784 }
785
tcp_v4_timewait_ack(struct sock * sk,struct sk_buff * skb)786 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
787 {
788 struct inet_timewait_sock *tw = inet_twsk(sk);
789 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
790
791 tcp_v4_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
792 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
793 tcp_time_stamp + tcptw->tw_ts_offset,
794 tcptw->tw_ts_recent,
795 tw->tw_bound_dev_if,
796 tcp_twsk_md5_key(tcptw),
797 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
798 tw->tw_tos
799 );
800
801 inet_twsk_put(tw);
802 }
803
tcp_v4_reqsk_send_ack(struct sock * sk,struct sk_buff * skb,struct request_sock * req)804 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
805 struct request_sock *req)
806 {
807 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
808 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
809 */
810 tcp_v4_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
811 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
812 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
813 tcp_time_stamp,
814 req->ts_recent,
815 0,
816 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
817 AF_INET),
818 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
819 ip_hdr(skb)->tos);
820 }
821
822 /*
823 * Send a SYN-ACK after having received a SYN.
824 * This still operates on a request_sock only, not on a big
825 * socket.
826 */
tcp_v4_send_synack(struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,u16 queue_mapping,struct tcp_fastopen_cookie * foc)827 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
828 struct flowi *fl,
829 struct request_sock *req,
830 u16 queue_mapping,
831 struct tcp_fastopen_cookie *foc)
832 {
833 const struct inet_request_sock *ireq = inet_rsk(req);
834 struct flowi4 fl4;
835 int err = -1;
836 struct sk_buff *skb;
837
838 /* First, grab a route. */
839 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
840 return -1;
841
842 skb = tcp_make_synack(sk, dst, req, foc);
843
844 if (skb) {
845 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
846
847 skb_set_queue_mapping(skb, queue_mapping);
848 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
849 ireq->ir_rmt_addr,
850 ireq->opt);
851 err = net_xmit_eval(err);
852 }
853
854 return err;
855 }
856
857 /*
858 * IPv4 request_sock destructor.
859 */
tcp_v4_reqsk_destructor(struct request_sock * req)860 static void tcp_v4_reqsk_destructor(struct request_sock *req)
861 {
862 kfree(inet_rsk(req)->opt);
863 }
864
865 /*
866 * Return true if a syncookie should be sent
867 */
tcp_syn_flood_action(struct sock * sk,const struct sk_buff * skb,const char * proto)868 bool tcp_syn_flood_action(struct sock *sk,
869 const struct sk_buff *skb,
870 const char *proto)
871 {
872 const char *msg = "Dropping request";
873 bool want_cookie = false;
874 struct listen_sock *lopt;
875
876 #ifdef CONFIG_SYN_COOKIES
877 if (sysctl_tcp_syncookies) {
878 msg = "Sending cookies";
879 want_cookie = true;
880 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
881 } else
882 #endif
883 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
884
885 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
886 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
887 lopt->synflood_warned = 1;
888 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
889 proto, ntohs(tcp_hdr(skb)->dest), msg);
890 }
891 return want_cookie;
892 }
893 EXPORT_SYMBOL(tcp_syn_flood_action);
894
895 #ifdef CONFIG_TCP_MD5SIG
896 /*
897 * RFC2385 MD5 checksumming requires a mapping of
898 * IP address->MD5 Key.
899 * We need to maintain these in the sk structure.
900 */
901
902 /* Find the Key structure for an address. */
tcp_md5_do_lookup(struct sock * sk,const union tcp_md5_addr * addr,int family)903 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
904 const union tcp_md5_addr *addr,
905 int family)
906 {
907 struct tcp_sock *tp = tcp_sk(sk);
908 struct tcp_md5sig_key *key;
909 unsigned int size = sizeof(struct in_addr);
910 struct tcp_md5sig_info *md5sig;
911
912 /* caller either holds rcu_read_lock() or socket lock */
913 md5sig = rcu_dereference_check(tp->md5sig_info,
914 sock_owned_by_user(sk) ||
915 lockdep_is_held(&sk->sk_lock.slock));
916 if (!md5sig)
917 return NULL;
918 #if IS_ENABLED(CONFIG_IPV6)
919 if (family == AF_INET6)
920 size = sizeof(struct in6_addr);
921 #endif
922 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
923 if (key->family != family)
924 continue;
925 if (!memcmp(&key->addr, addr, size))
926 return key;
927 }
928 return NULL;
929 }
930 EXPORT_SYMBOL(tcp_md5_do_lookup);
931
tcp_v4_md5_lookup(struct sock * sk,struct sock * addr_sk)932 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
933 struct sock *addr_sk)
934 {
935 union tcp_md5_addr *addr;
936
937 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
938 return tcp_md5_do_lookup(sk, addr, AF_INET);
939 }
940 EXPORT_SYMBOL(tcp_v4_md5_lookup);
941
tcp_v4_reqsk_md5_lookup(struct sock * sk,struct request_sock * req)942 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
943 struct request_sock *req)
944 {
945 union tcp_md5_addr *addr;
946
947 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
948 return tcp_md5_do_lookup(sk, addr, AF_INET);
949 }
950
951 /* This can be called on a newly created socket, from other files */
tcp_md5_do_add(struct sock * sk,const union tcp_md5_addr * addr,int family,const u8 * newkey,u8 newkeylen,gfp_t gfp)952 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
953 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
954 {
955 /* Add Key to the list */
956 struct tcp_md5sig_key *key;
957 struct tcp_sock *tp = tcp_sk(sk);
958 struct tcp_md5sig_info *md5sig;
959
960 key = tcp_md5_do_lookup(sk, addr, family);
961 if (key) {
962 /* Pre-existing entry - just update that one. */
963 memcpy(key->key, newkey, newkeylen);
964 key->keylen = newkeylen;
965 return 0;
966 }
967
968 md5sig = rcu_dereference_protected(tp->md5sig_info,
969 sock_owned_by_user(sk) ||
970 lockdep_is_held(&sk->sk_lock.slock));
971 if (!md5sig) {
972 md5sig = kmalloc(sizeof(*md5sig), gfp);
973 if (!md5sig)
974 return -ENOMEM;
975
976 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
977 INIT_HLIST_HEAD(&md5sig->head);
978 rcu_assign_pointer(tp->md5sig_info, md5sig);
979 }
980
981 key = sock_kmalloc(sk, sizeof(*key), gfp);
982 if (!key)
983 return -ENOMEM;
984 if (!tcp_alloc_md5sig_pool()) {
985 sock_kfree_s(sk, key, sizeof(*key));
986 return -ENOMEM;
987 }
988
989 memcpy(key->key, newkey, newkeylen);
990 key->keylen = newkeylen;
991 key->family = family;
992 memcpy(&key->addr, addr,
993 (family == AF_INET6) ? sizeof(struct in6_addr) :
994 sizeof(struct in_addr));
995 hlist_add_head_rcu(&key->node, &md5sig->head);
996 return 0;
997 }
998 EXPORT_SYMBOL(tcp_md5_do_add);
999
tcp_md5_do_del(struct sock * sk,const union tcp_md5_addr * addr,int family)1000 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1001 {
1002 struct tcp_md5sig_key *key;
1003
1004 key = tcp_md5_do_lookup(sk, addr, family);
1005 if (!key)
1006 return -ENOENT;
1007 hlist_del_rcu(&key->node);
1008 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1009 kfree_rcu(key, rcu);
1010 return 0;
1011 }
1012 EXPORT_SYMBOL(tcp_md5_do_del);
1013
tcp_clear_md5_list(struct sock * sk)1014 static void tcp_clear_md5_list(struct sock *sk)
1015 {
1016 struct tcp_sock *tp = tcp_sk(sk);
1017 struct tcp_md5sig_key *key;
1018 struct hlist_node *n;
1019 struct tcp_md5sig_info *md5sig;
1020
1021 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1022
1023 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1024 hlist_del_rcu(&key->node);
1025 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1026 kfree_rcu(key, rcu);
1027 }
1028 }
1029
tcp_v4_parse_md5_keys(struct sock * sk,char __user * optval,int optlen)1030 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1031 int optlen)
1032 {
1033 struct tcp_md5sig cmd;
1034 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1035
1036 if (optlen < sizeof(cmd))
1037 return -EINVAL;
1038
1039 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1040 return -EFAULT;
1041
1042 if (sin->sin_family != AF_INET)
1043 return -EINVAL;
1044
1045 if (!cmd.tcpm_keylen)
1046 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1047 AF_INET);
1048
1049 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1050 return -EINVAL;
1051
1052 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1053 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1054 GFP_KERNEL);
1055 }
1056
tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool * hp,__be32 daddr,__be32 saddr,int nbytes)1057 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1058 __be32 daddr, __be32 saddr, int nbytes)
1059 {
1060 struct tcp4_pseudohdr *bp;
1061 struct scatterlist sg;
1062
1063 bp = &hp->md5_blk.ip4;
1064
1065 /*
1066 * 1. the TCP pseudo-header (in the order: source IP address,
1067 * destination IP address, zero-padded protocol number, and
1068 * segment length)
1069 */
1070 bp->saddr = saddr;
1071 bp->daddr = daddr;
1072 bp->pad = 0;
1073 bp->protocol = IPPROTO_TCP;
1074 bp->len = cpu_to_be16(nbytes);
1075
1076 sg_init_one(&sg, bp, sizeof(*bp));
1077 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1078 }
1079
tcp_v4_md5_hash_hdr(char * md5_hash,const struct tcp_md5sig_key * key,__be32 daddr,__be32 saddr,const struct tcphdr * th)1080 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1081 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1082 {
1083 struct tcp_md5sig_pool *hp;
1084 struct hash_desc *desc;
1085
1086 hp = tcp_get_md5sig_pool();
1087 if (!hp)
1088 goto clear_hash_noput;
1089 desc = &hp->md5_desc;
1090
1091 if (crypto_hash_init(desc))
1092 goto clear_hash;
1093 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1094 goto clear_hash;
1095 if (tcp_md5_hash_header(hp, th))
1096 goto clear_hash;
1097 if (tcp_md5_hash_key(hp, key))
1098 goto clear_hash;
1099 if (crypto_hash_final(desc, md5_hash))
1100 goto clear_hash;
1101
1102 tcp_put_md5sig_pool();
1103 return 0;
1104
1105 clear_hash:
1106 tcp_put_md5sig_pool();
1107 clear_hash_noput:
1108 memset(md5_hash, 0, 16);
1109 return 1;
1110 }
1111
tcp_v4_md5_hash_skb(char * md5_hash,struct tcp_md5sig_key * key,const struct sock * sk,const struct request_sock * req,const struct sk_buff * skb)1112 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1113 const struct sock *sk, const struct request_sock *req,
1114 const struct sk_buff *skb)
1115 {
1116 struct tcp_md5sig_pool *hp;
1117 struct hash_desc *desc;
1118 const struct tcphdr *th = tcp_hdr(skb);
1119 __be32 saddr, daddr;
1120
1121 if (sk) {
1122 saddr = inet_sk(sk)->inet_saddr;
1123 daddr = inet_sk(sk)->inet_daddr;
1124 } else if (req) {
1125 saddr = inet_rsk(req)->ir_loc_addr;
1126 daddr = inet_rsk(req)->ir_rmt_addr;
1127 } else {
1128 const struct iphdr *iph = ip_hdr(skb);
1129 saddr = iph->saddr;
1130 daddr = iph->daddr;
1131 }
1132
1133 hp = tcp_get_md5sig_pool();
1134 if (!hp)
1135 goto clear_hash_noput;
1136 desc = &hp->md5_desc;
1137
1138 if (crypto_hash_init(desc))
1139 goto clear_hash;
1140
1141 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1142 goto clear_hash;
1143 if (tcp_md5_hash_header(hp, th))
1144 goto clear_hash;
1145 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1146 goto clear_hash;
1147 if (tcp_md5_hash_key(hp, key))
1148 goto clear_hash;
1149 if (crypto_hash_final(desc, md5_hash))
1150 goto clear_hash;
1151
1152 tcp_put_md5sig_pool();
1153 return 0;
1154
1155 clear_hash:
1156 tcp_put_md5sig_pool();
1157 clear_hash_noput:
1158 memset(md5_hash, 0, 16);
1159 return 1;
1160 }
1161 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1162
__tcp_v4_inbound_md5_hash(struct sock * sk,const struct sk_buff * skb)1163 static bool __tcp_v4_inbound_md5_hash(struct sock *sk,
1164 const struct sk_buff *skb)
1165 {
1166 /*
1167 * This gets called for each TCP segment that arrives
1168 * so we want to be efficient.
1169 * We have 3 drop cases:
1170 * o No MD5 hash and one expected.
1171 * o MD5 hash and we're not expecting one.
1172 * o MD5 hash and its wrong.
1173 */
1174 const __u8 *hash_location = NULL;
1175 struct tcp_md5sig_key *hash_expected;
1176 const struct iphdr *iph = ip_hdr(skb);
1177 const struct tcphdr *th = tcp_hdr(skb);
1178 int genhash;
1179 unsigned char newhash[16];
1180
1181 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1182 AF_INET);
1183 hash_location = tcp_parse_md5sig_option(th);
1184
1185 /* We've parsed the options - do we have a hash? */
1186 if (!hash_expected && !hash_location)
1187 return false;
1188
1189 if (hash_expected && !hash_location) {
1190 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1191 return true;
1192 }
1193
1194 if (!hash_expected && hash_location) {
1195 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1196 return true;
1197 }
1198
1199 /* Okay, so this is hash_expected and hash_location -
1200 * so we need to calculate the checksum.
1201 */
1202 genhash = tcp_v4_md5_hash_skb(newhash,
1203 hash_expected,
1204 NULL, NULL, skb);
1205
1206 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1207 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1208 &iph->saddr, ntohs(th->source),
1209 &iph->daddr, ntohs(th->dest),
1210 genhash ? " tcp_v4_calc_md5_hash failed"
1211 : "");
1212 return true;
1213 }
1214 return false;
1215 }
1216
tcp_v4_inbound_md5_hash(struct sock * sk,const struct sk_buff * skb)1217 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1218 {
1219 bool ret;
1220
1221 rcu_read_lock();
1222 ret = __tcp_v4_inbound_md5_hash(sk, skb);
1223 rcu_read_unlock();
1224
1225 return ret;
1226 }
1227
1228 #endif
1229
tcp_v4_init_req(struct request_sock * req,struct sock * sk,struct sk_buff * skb)1230 static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
1231 struct sk_buff *skb)
1232 {
1233 struct inet_request_sock *ireq = inet_rsk(req);
1234
1235 ireq->ir_loc_addr = ip_hdr(skb)->daddr;
1236 ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
1237 ireq->no_srccheck = inet_sk(sk)->transparent;
1238 ireq->opt = tcp_v4_save_options(skb);
1239 }
1240
tcp_v4_route_req(struct sock * sk,struct flowi * fl,const struct request_sock * req,bool * strict)1241 static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
1242 const struct request_sock *req,
1243 bool *strict)
1244 {
1245 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1246
1247 if (strict) {
1248 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1249 *strict = true;
1250 else
1251 *strict = false;
1252 }
1253
1254 return dst;
1255 }
1256
1257 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1258 .family = PF_INET,
1259 .obj_size = sizeof(struct tcp_request_sock),
1260 .rtx_syn_ack = tcp_rtx_synack,
1261 .send_ack = tcp_v4_reqsk_send_ack,
1262 .destructor = tcp_v4_reqsk_destructor,
1263 .send_reset = tcp_v4_send_reset,
1264 .syn_ack_timeout = tcp_syn_ack_timeout,
1265 };
1266
1267 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1268 .mss_clamp = TCP_MSS_DEFAULT,
1269 #ifdef CONFIG_TCP_MD5SIG
1270 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1271 .calc_md5_hash = tcp_v4_md5_hash_skb,
1272 #endif
1273 .init_req = tcp_v4_init_req,
1274 #ifdef CONFIG_SYN_COOKIES
1275 .cookie_init_seq = cookie_v4_init_sequence,
1276 #endif
1277 .route_req = tcp_v4_route_req,
1278 .init_seq = tcp_v4_init_sequence,
1279 .send_synack = tcp_v4_send_synack,
1280 .queue_hash_add = inet_csk_reqsk_queue_hash_add,
1281 };
1282
tcp_v4_conn_request(struct sock * sk,struct sk_buff * skb)1283 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1284 {
1285 /* Never answer to SYNs send to broadcast or multicast */
1286 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1287 goto drop;
1288
1289 return tcp_conn_request(&tcp_request_sock_ops,
1290 &tcp_request_sock_ipv4_ops, sk, skb);
1291
1292 drop:
1293 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1294 return 0;
1295 }
1296 EXPORT_SYMBOL(tcp_v4_conn_request);
1297
1298
1299 /*
1300 * The three way handshake has completed - we got a valid synack -
1301 * now create the new socket.
1302 */
tcp_v4_syn_recv_sock(struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct dst_entry * dst)1303 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1304 struct request_sock *req,
1305 struct dst_entry *dst)
1306 {
1307 struct inet_request_sock *ireq;
1308 struct inet_sock *newinet;
1309 struct tcp_sock *newtp;
1310 struct sock *newsk;
1311 #ifdef CONFIG_TCP_MD5SIG
1312 struct tcp_md5sig_key *key;
1313 #endif
1314 struct ip_options_rcu *inet_opt;
1315
1316 if (sk_acceptq_is_full(sk))
1317 goto exit_overflow;
1318
1319 newsk = tcp_create_openreq_child(sk, req, skb);
1320 if (!newsk)
1321 goto exit_nonewsk;
1322
1323 newsk->sk_gso_type = SKB_GSO_TCPV4;
1324 inet_sk_rx_dst_set(newsk, skb);
1325
1326 newtp = tcp_sk(newsk);
1327 newinet = inet_sk(newsk);
1328 ireq = inet_rsk(req);
1329 newinet->inet_daddr = ireq->ir_rmt_addr;
1330 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1331 newinet->inet_saddr = ireq->ir_loc_addr;
1332 inet_opt = ireq->opt;
1333 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1334 ireq->opt = NULL;
1335 newinet->mc_index = inet_iif(skb);
1336 newinet->mc_ttl = ip_hdr(skb)->ttl;
1337 newinet->rcv_tos = ip_hdr(skb)->tos;
1338 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1339 inet_set_txhash(newsk);
1340 if (inet_opt)
1341 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1342 newinet->inet_id = newtp->write_seq ^ jiffies;
1343
1344 if (!dst) {
1345 dst = inet_csk_route_child_sock(sk, newsk, req);
1346 if (!dst)
1347 goto put_and_exit;
1348 } else {
1349 /* syncookie case : see end of cookie_v4_check() */
1350 }
1351 sk_setup_caps(newsk, dst);
1352
1353 tcp_sync_mss(newsk, dst_mtu(dst));
1354 newtp->advmss = dst_metric_advmss(dst);
1355 if (tcp_sk(sk)->rx_opt.user_mss &&
1356 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1357 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1358
1359 tcp_initialize_rcv_mss(newsk);
1360
1361 #ifdef CONFIG_TCP_MD5SIG
1362 /* Copy over the MD5 key from the original socket */
1363 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1364 AF_INET);
1365 if (key != NULL) {
1366 /*
1367 * We're using one, so create a matching key
1368 * on the newsk structure. If we fail to get
1369 * memory, then we end up not copying the key
1370 * across. Shucks.
1371 */
1372 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1373 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1374 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1375 }
1376 #endif
1377
1378 if (__inet_inherit_port(sk, newsk) < 0)
1379 goto put_and_exit;
1380 __inet_hash_nolisten(newsk, NULL);
1381
1382 return newsk;
1383
1384 exit_overflow:
1385 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1386 exit_nonewsk:
1387 dst_release(dst);
1388 exit:
1389 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1390 return NULL;
1391 put_and_exit:
1392 inet_csk_prepare_forced_close(newsk);
1393 tcp_done(newsk);
1394 goto exit;
1395 }
1396 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1397
tcp_v4_hnd_req(struct sock * sk,struct sk_buff * skb)1398 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1399 {
1400 struct tcphdr *th = tcp_hdr(skb);
1401 const struct iphdr *iph = ip_hdr(skb);
1402 struct sock *nsk;
1403 struct request_sock **prev;
1404 /* Find possible connection requests. */
1405 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1406 iph->saddr, iph->daddr);
1407 if (req)
1408 return tcp_check_req(sk, skb, req, prev, false);
1409
1410 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1411 th->source, iph->daddr, th->dest, inet_iif(skb));
1412
1413 if (nsk) {
1414 if (nsk->sk_state != TCP_TIME_WAIT) {
1415 bh_lock_sock(nsk);
1416 return nsk;
1417 }
1418 inet_twsk_put(inet_twsk(nsk));
1419 return NULL;
1420 }
1421
1422 #ifdef CONFIG_SYN_COOKIES
1423 if (!th->syn)
1424 sk = cookie_v4_check(sk, skb);
1425 #endif
1426 return sk;
1427 }
1428
1429 /* The socket must have it's spinlock held when we get
1430 * here.
1431 *
1432 * We have a potential double-lock case here, so even when
1433 * doing backlog processing we use the BH locking scheme.
1434 * This is because we cannot sleep with the original spinlock
1435 * held.
1436 */
tcp_v4_do_rcv(struct sock * sk,struct sk_buff * skb)1437 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1438 {
1439 struct sock *rsk;
1440
1441 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1442 struct dst_entry *dst = sk->sk_rx_dst;
1443
1444 sock_rps_save_rxhash(sk, skb);
1445 if (dst) {
1446 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1447 dst->ops->check(dst, 0) == NULL) {
1448 dst_release(dst);
1449 sk->sk_rx_dst = NULL;
1450 }
1451 }
1452 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1453 return 0;
1454 }
1455
1456 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1457 goto csum_err;
1458
1459 if (sk->sk_state == TCP_LISTEN) {
1460 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1461 if (!nsk)
1462 goto discard;
1463
1464 if (nsk != sk) {
1465 sock_rps_save_rxhash(nsk, skb);
1466 if (tcp_child_process(sk, nsk, skb)) {
1467 rsk = nsk;
1468 goto reset;
1469 }
1470 return 0;
1471 }
1472 } else
1473 sock_rps_save_rxhash(sk, skb);
1474
1475 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1476 rsk = sk;
1477 goto reset;
1478 }
1479 return 0;
1480
1481 reset:
1482 tcp_v4_send_reset(rsk, skb);
1483 discard:
1484 kfree_skb(skb);
1485 /* Be careful here. If this function gets more complicated and
1486 * gcc suffers from register pressure on the x86, sk (in %ebx)
1487 * might be destroyed here. This current version compiles correctly,
1488 * but you have been warned.
1489 */
1490 return 0;
1491
1492 csum_err:
1493 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1494 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1495 goto discard;
1496 }
1497 EXPORT_SYMBOL(tcp_v4_do_rcv);
1498
tcp_v4_early_demux(struct sk_buff * skb)1499 void tcp_v4_early_demux(struct sk_buff *skb)
1500 {
1501 const struct iphdr *iph;
1502 const struct tcphdr *th;
1503 struct sock *sk;
1504
1505 if (skb->pkt_type != PACKET_HOST)
1506 return;
1507
1508 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1509 return;
1510
1511 iph = ip_hdr(skb);
1512 th = tcp_hdr(skb);
1513
1514 if (th->doff < sizeof(struct tcphdr) / 4)
1515 return;
1516
1517 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1518 iph->saddr, th->source,
1519 iph->daddr, ntohs(th->dest),
1520 skb->skb_iif);
1521 if (sk) {
1522 skb->sk = sk;
1523 skb->destructor = sock_edemux;
1524 if (sk->sk_state != TCP_TIME_WAIT) {
1525 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1526
1527 if (dst)
1528 dst = dst_check(dst, 0);
1529 if (dst &&
1530 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1531 skb_dst_set_noref(skb, dst);
1532 }
1533 }
1534 }
1535
1536 /* Packet is added to VJ-style prequeue for processing in process
1537 * context, if a reader task is waiting. Apparently, this exciting
1538 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1539 * failed somewhere. Latency? Burstiness? Well, at least now we will
1540 * see, why it failed. 8)8) --ANK
1541 *
1542 */
tcp_prequeue(struct sock * sk,struct sk_buff * skb)1543 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1544 {
1545 struct tcp_sock *tp = tcp_sk(sk);
1546
1547 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1548 return false;
1549
1550 if (skb->len <= tcp_hdrlen(skb) &&
1551 skb_queue_len(&tp->ucopy.prequeue) == 0)
1552 return false;
1553
1554 /* Before escaping RCU protected region, we need to take care of skb
1555 * dst. Prequeue is only enabled for established sockets.
1556 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1557 * Instead of doing full sk_rx_dst validity here, let's perform
1558 * an optimistic check.
1559 */
1560 if (likely(sk->sk_rx_dst))
1561 skb_dst_drop(skb);
1562 else
1563 skb_dst_force_safe(skb);
1564
1565 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1566 tp->ucopy.memory += skb->truesize;
1567 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1568 struct sk_buff *skb1;
1569
1570 BUG_ON(sock_owned_by_user(sk));
1571
1572 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1573 sk_backlog_rcv(sk, skb1);
1574 NET_INC_STATS_BH(sock_net(sk),
1575 LINUX_MIB_TCPPREQUEUEDROPPED);
1576 }
1577
1578 tp->ucopy.memory = 0;
1579 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1580 wake_up_interruptible_sync_poll(sk_sleep(sk),
1581 POLLIN | POLLRDNORM | POLLRDBAND);
1582 if (!inet_csk_ack_scheduled(sk))
1583 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1584 (3 * tcp_rto_min(sk)) / 4,
1585 TCP_RTO_MAX);
1586 }
1587 return true;
1588 }
1589 EXPORT_SYMBOL(tcp_prequeue);
1590
1591 /*
1592 * From tcp_input.c
1593 */
1594
tcp_v4_rcv(struct sk_buff * skb)1595 int tcp_v4_rcv(struct sk_buff *skb)
1596 {
1597 const struct iphdr *iph;
1598 const struct tcphdr *th;
1599 struct sock *sk;
1600 int ret;
1601 struct net *net = dev_net(skb->dev);
1602
1603 if (skb->pkt_type != PACKET_HOST)
1604 goto discard_it;
1605
1606 /* Count it even if it's bad */
1607 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1608
1609 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1610 goto discard_it;
1611
1612 th = tcp_hdr(skb);
1613
1614 if (th->doff < sizeof(struct tcphdr) / 4)
1615 goto bad_packet;
1616 if (!pskb_may_pull(skb, th->doff * 4))
1617 goto discard_it;
1618
1619 /* An explanation is required here, I think.
1620 * Packet length and doff are validated by header prediction,
1621 * provided case of th->doff==0 is eliminated.
1622 * So, we defer the checks. */
1623
1624 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1625 goto csum_error;
1626
1627 th = tcp_hdr(skb);
1628 iph = ip_hdr(skb);
1629 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1630 * barrier() makes sure compiler wont play fool^Waliasing games.
1631 */
1632 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1633 sizeof(struct inet_skb_parm));
1634 barrier();
1635
1636 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1637 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1638 skb->len - th->doff * 4);
1639 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1640 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1641 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1642 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1643 TCP_SKB_CB(skb)->sacked = 0;
1644
1645 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1646 if (!sk)
1647 goto no_tcp_socket;
1648
1649 process:
1650 if (sk->sk_state == TCP_TIME_WAIT)
1651 goto do_time_wait;
1652
1653 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1654 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1655 goto discard_and_relse;
1656 }
1657
1658 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1659 goto discard_and_relse;
1660
1661 #ifdef CONFIG_TCP_MD5SIG
1662 /*
1663 * We really want to reject the packet as early as possible
1664 * if:
1665 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1666 * o There is an MD5 option and we're not expecting one
1667 */
1668 if (tcp_v4_inbound_md5_hash(sk, skb))
1669 goto discard_and_relse;
1670 #endif
1671
1672 nf_reset(skb);
1673
1674 if (sk_filter(sk, skb))
1675 goto discard_and_relse;
1676
1677 sk_mark_napi_id(sk, skb);
1678 skb->dev = NULL;
1679
1680 bh_lock_sock_nested(sk);
1681 ret = 0;
1682 if (!sock_owned_by_user(sk)) {
1683 if (!tcp_prequeue(sk, skb))
1684 ret = tcp_v4_do_rcv(sk, skb);
1685 } else if (unlikely(sk_add_backlog(sk, skb,
1686 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1687 bh_unlock_sock(sk);
1688 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1689 goto discard_and_relse;
1690 }
1691 bh_unlock_sock(sk);
1692
1693 sock_put(sk);
1694
1695 return ret;
1696
1697 no_tcp_socket:
1698 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1699 goto discard_it;
1700
1701 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1702 csum_error:
1703 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1704 bad_packet:
1705 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1706 } else {
1707 tcp_v4_send_reset(NULL, skb);
1708 }
1709
1710 discard_it:
1711 /* Discard frame. */
1712 kfree_skb(skb);
1713 return 0;
1714
1715 discard_and_relse:
1716 sock_put(sk);
1717 goto discard_it;
1718
1719 do_time_wait:
1720 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1721 inet_twsk_put(inet_twsk(sk));
1722 goto discard_it;
1723 }
1724
1725 if (skb->len < (th->doff << 2)) {
1726 inet_twsk_put(inet_twsk(sk));
1727 goto bad_packet;
1728 }
1729 if (tcp_checksum_complete(skb)) {
1730 inet_twsk_put(inet_twsk(sk));
1731 goto csum_error;
1732 }
1733 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1734 case TCP_TW_SYN: {
1735 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1736 &tcp_hashinfo,
1737 iph->saddr, th->source,
1738 iph->daddr, th->dest,
1739 inet_iif(skb));
1740 if (sk2) {
1741 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1742 inet_twsk_put(inet_twsk(sk));
1743 sk = sk2;
1744 goto process;
1745 }
1746 /* Fall through to ACK */
1747 }
1748 case TCP_TW_ACK:
1749 tcp_v4_timewait_ack(sk, skb);
1750 break;
1751 case TCP_TW_RST:
1752 goto no_tcp_socket;
1753 case TCP_TW_SUCCESS:;
1754 }
1755 goto discard_it;
1756 }
1757
1758 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1759 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1760 .twsk_unique = tcp_twsk_unique,
1761 .twsk_destructor= tcp_twsk_destructor,
1762 };
1763
inet_sk_rx_dst_set(struct sock * sk,const struct sk_buff * skb)1764 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1765 {
1766 struct dst_entry *dst = skb_dst(skb);
1767
1768 if (dst && dst_hold_safe(dst)) {
1769 sk->sk_rx_dst = dst;
1770 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1771 }
1772 }
1773 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1774
1775 const struct inet_connection_sock_af_ops ipv4_specific = {
1776 .queue_xmit = ip_queue_xmit,
1777 .send_check = tcp_v4_send_check,
1778 .rebuild_header = inet_sk_rebuild_header,
1779 .sk_rx_dst_set = inet_sk_rx_dst_set,
1780 .conn_request = tcp_v4_conn_request,
1781 .syn_recv_sock = tcp_v4_syn_recv_sock,
1782 .net_header_len = sizeof(struct iphdr),
1783 .setsockopt = ip_setsockopt,
1784 .getsockopt = ip_getsockopt,
1785 .addr2sockaddr = inet_csk_addr2sockaddr,
1786 .sockaddr_len = sizeof(struct sockaddr_in),
1787 .bind_conflict = inet_csk_bind_conflict,
1788 #ifdef CONFIG_COMPAT
1789 .compat_setsockopt = compat_ip_setsockopt,
1790 .compat_getsockopt = compat_ip_getsockopt,
1791 #endif
1792 .mtu_reduced = tcp_v4_mtu_reduced,
1793 };
1794 EXPORT_SYMBOL(ipv4_specific);
1795
1796 #ifdef CONFIG_TCP_MD5SIG
1797 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1798 .md5_lookup = tcp_v4_md5_lookup,
1799 .calc_md5_hash = tcp_v4_md5_hash_skb,
1800 .md5_parse = tcp_v4_parse_md5_keys,
1801 };
1802 #endif
1803
1804 /* NOTE: A lot of things set to zero explicitly by call to
1805 * sk_alloc() so need not be done here.
1806 */
tcp_v4_init_sock(struct sock * sk)1807 static int tcp_v4_init_sock(struct sock *sk)
1808 {
1809 struct inet_connection_sock *icsk = inet_csk(sk);
1810
1811 tcp_init_sock(sk);
1812
1813 icsk->icsk_af_ops = &ipv4_specific;
1814
1815 #ifdef CONFIG_TCP_MD5SIG
1816 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1817 #endif
1818
1819 return 0;
1820 }
1821
tcp_v4_destroy_sock(struct sock * sk)1822 void tcp_v4_destroy_sock(struct sock *sk)
1823 {
1824 struct tcp_sock *tp = tcp_sk(sk);
1825
1826 tcp_clear_xmit_timers(sk);
1827
1828 tcp_cleanup_congestion_control(sk);
1829
1830 /* Cleanup up the write buffer. */
1831 tcp_write_queue_purge(sk);
1832
1833 /* Cleans up our, hopefully empty, out_of_order_queue. */
1834 __skb_queue_purge(&tp->out_of_order_queue);
1835
1836 #ifdef CONFIG_TCP_MD5SIG
1837 /* Clean up the MD5 key list, if any */
1838 if (tp->md5sig_info) {
1839 tcp_clear_md5_list(sk);
1840 kfree_rcu(tp->md5sig_info, rcu);
1841 tp->md5sig_info = NULL;
1842 }
1843 #endif
1844
1845 /* Clean prequeue, it must be empty really */
1846 __skb_queue_purge(&tp->ucopy.prequeue);
1847
1848 /* Clean up a referenced TCP bind bucket. */
1849 if (inet_csk(sk)->icsk_bind_hash)
1850 inet_put_port(sk);
1851
1852 BUG_ON(tp->fastopen_rsk != NULL);
1853
1854 /* If socket is aborted during connect operation */
1855 tcp_free_fastopen_req(tp);
1856
1857 sk_sockets_allocated_dec(sk);
1858 sock_release_memcg(sk);
1859 }
1860 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1861
1862 #ifdef CONFIG_PROC_FS
1863 /* Proc filesystem TCP sock list dumping. */
1864
1865 /*
1866 * Get next listener socket follow cur. If cur is NULL, get first socket
1867 * starting from bucket given in st->bucket; when st->bucket is zero the
1868 * very first socket in the hash table is returned.
1869 */
listening_get_next(struct seq_file * seq,void * cur)1870 static void *listening_get_next(struct seq_file *seq, void *cur)
1871 {
1872 struct inet_connection_sock *icsk;
1873 struct hlist_nulls_node *node;
1874 struct sock *sk = cur;
1875 struct inet_listen_hashbucket *ilb;
1876 struct tcp_iter_state *st = seq->private;
1877 struct net *net = seq_file_net(seq);
1878
1879 if (!sk) {
1880 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1881 spin_lock_bh(&ilb->lock);
1882 sk = sk_nulls_head(&ilb->head);
1883 st->offset = 0;
1884 goto get_sk;
1885 }
1886 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1887 ++st->num;
1888 ++st->offset;
1889
1890 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1891 struct request_sock *req = cur;
1892
1893 icsk = inet_csk(st->syn_wait_sk);
1894 req = req->dl_next;
1895 while (1) {
1896 while (req) {
1897 if (req->rsk_ops->family == st->family) {
1898 cur = req;
1899 goto out;
1900 }
1901 req = req->dl_next;
1902 }
1903 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1904 break;
1905 get_req:
1906 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
1907 }
1908 sk = sk_nulls_next(st->syn_wait_sk);
1909 st->state = TCP_SEQ_STATE_LISTENING;
1910 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1911 } else {
1912 icsk = inet_csk(sk);
1913 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1914 if (reqsk_queue_len(&icsk->icsk_accept_queue))
1915 goto start_req;
1916 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1917 sk = sk_nulls_next(sk);
1918 }
1919 get_sk:
1920 sk_nulls_for_each_from(sk, node) {
1921 if (!net_eq(sock_net(sk), net))
1922 continue;
1923 if (sk->sk_family == st->family) {
1924 cur = sk;
1925 goto out;
1926 }
1927 icsk = inet_csk(sk);
1928 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1929 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
1930 start_req:
1931 st->uid = sock_i_uid(sk);
1932 st->syn_wait_sk = sk;
1933 st->state = TCP_SEQ_STATE_OPENREQ;
1934 st->sbucket = 0;
1935 goto get_req;
1936 }
1937 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
1938 }
1939 spin_unlock_bh(&ilb->lock);
1940 st->offset = 0;
1941 if (++st->bucket < INET_LHTABLE_SIZE) {
1942 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1943 spin_lock_bh(&ilb->lock);
1944 sk = sk_nulls_head(&ilb->head);
1945 goto get_sk;
1946 }
1947 cur = NULL;
1948 out:
1949 return cur;
1950 }
1951
listening_get_idx(struct seq_file * seq,loff_t * pos)1952 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1953 {
1954 struct tcp_iter_state *st = seq->private;
1955 void *rc;
1956
1957 st->bucket = 0;
1958 st->offset = 0;
1959 rc = listening_get_next(seq, NULL);
1960
1961 while (rc && *pos) {
1962 rc = listening_get_next(seq, rc);
1963 --*pos;
1964 }
1965 return rc;
1966 }
1967
empty_bucket(const struct tcp_iter_state * st)1968 static inline bool empty_bucket(const struct tcp_iter_state *st)
1969 {
1970 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1971 }
1972
1973 /*
1974 * Get first established socket starting from bucket given in st->bucket.
1975 * If st->bucket is zero, the very first socket in the hash is returned.
1976 */
established_get_first(struct seq_file * seq)1977 static void *established_get_first(struct seq_file *seq)
1978 {
1979 struct tcp_iter_state *st = seq->private;
1980 struct net *net = seq_file_net(seq);
1981 void *rc = NULL;
1982
1983 st->offset = 0;
1984 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1985 struct sock *sk;
1986 struct hlist_nulls_node *node;
1987 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1988
1989 /* Lockless fast path for the common case of empty buckets */
1990 if (empty_bucket(st))
1991 continue;
1992
1993 spin_lock_bh(lock);
1994 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1995 if (sk->sk_family != st->family ||
1996 !net_eq(sock_net(sk), net)) {
1997 continue;
1998 }
1999 rc = sk;
2000 goto out;
2001 }
2002 spin_unlock_bh(lock);
2003 }
2004 out:
2005 return rc;
2006 }
2007
established_get_next(struct seq_file * seq,void * cur)2008 static void *established_get_next(struct seq_file *seq, void *cur)
2009 {
2010 struct sock *sk = cur;
2011 struct hlist_nulls_node *node;
2012 struct tcp_iter_state *st = seq->private;
2013 struct net *net = seq_file_net(seq);
2014
2015 ++st->num;
2016 ++st->offset;
2017
2018 sk = sk_nulls_next(sk);
2019
2020 sk_nulls_for_each_from(sk, node) {
2021 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2022 return sk;
2023 }
2024
2025 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2026 ++st->bucket;
2027 return established_get_first(seq);
2028 }
2029
established_get_idx(struct seq_file * seq,loff_t pos)2030 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2031 {
2032 struct tcp_iter_state *st = seq->private;
2033 void *rc;
2034
2035 st->bucket = 0;
2036 rc = established_get_first(seq);
2037
2038 while (rc && pos) {
2039 rc = established_get_next(seq, rc);
2040 --pos;
2041 }
2042 return rc;
2043 }
2044
tcp_get_idx(struct seq_file * seq,loff_t pos)2045 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2046 {
2047 void *rc;
2048 struct tcp_iter_state *st = seq->private;
2049
2050 st->state = TCP_SEQ_STATE_LISTENING;
2051 rc = listening_get_idx(seq, &pos);
2052
2053 if (!rc) {
2054 st->state = TCP_SEQ_STATE_ESTABLISHED;
2055 rc = established_get_idx(seq, pos);
2056 }
2057
2058 return rc;
2059 }
2060
tcp_seek_last_pos(struct seq_file * seq)2061 static void *tcp_seek_last_pos(struct seq_file *seq)
2062 {
2063 struct tcp_iter_state *st = seq->private;
2064 int offset = st->offset;
2065 int orig_num = st->num;
2066 void *rc = NULL;
2067
2068 switch (st->state) {
2069 case TCP_SEQ_STATE_OPENREQ:
2070 case TCP_SEQ_STATE_LISTENING:
2071 if (st->bucket >= INET_LHTABLE_SIZE)
2072 break;
2073 st->state = TCP_SEQ_STATE_LISTENING;
2074 rc = listening_get_next(seq, NULL);
2075 while (offset-- && rc)
2076 rc = listening_get_next(seq, rc);
2077 if (rc)
2078 break;
2079 st->bucket = 0;
2080 st->state = TCP_SEQ_STATE_ESTABLISHED;
2081 /* Fallthrough */
2082 case TCP_SEQ_STATE_ESTABLISHED:
2083 if (st->bucket > tcp_hashinfo.ehash_mask)
2084 break;
2085 rc = established_get_first(seq);
2086 while (offset-- && rc)
2087 rc = established_get_next(seq, rc);
2088 }
2089
2090 st->num = orig_num;
2091
2092 return rc;
2093 }
2094
tcp_seq_start(struct seq_file * seq,loff_t * pos)2095 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2096 {
2097 struct tcp_iter_state *st = seq->private;
2098 void *rc;
2099
2100 if (*pos && *pos == st->last_pos) {
2101 rc = tcp_seek_last_pos(seq);
2102 if (rc)
2103 goto out;
2104 }
2105
2106 st->state = TCP_SEQ_STATE_LISTENING;
2107 st->num = 0;
2108 st->bucket = 0;
2109 st->offset = 0;
2110 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2111
2112 out:
2113 st->last_pos = *pos;
2114 return rc;
2115 }
2116
tcp_seq_next(struct seq_file * seq,void * v,loff_t * pos)2117 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2118 {
2119 struct tcp_iter_state *st = seq->private;
2120 void *rc = NULL;
2121
2122 if (v == SEQ_START_TOKEN) {
2123 rc = tcp_get_idx(seq, 0);
2124 goto out;
2125 }
2126
2127 switch (st->state) {
2128 case TCP_SEQ_STATE_OPENREQ:
2129 case TCP_SEQ_STATE_LISTENING:
2130 rc = listening_get_next(seq, v);
2131 if (!rc) {
2132 st->state = TCP_SEQ_STATE_ESTABLISHED;
2133 st->bucket = 0;
2134 st->offset = 0;
2135 rc = established_get_first(seq);
2136 }
2137 break;
2138 case TCP_SEQ_STATE_ESTABLISHED:
2139 rc = established_get_next(seq, v);
2140 break;
2141 }
2142 out:
2143 ++*pos;
2144 st->last_pos = *pos;
2145 return rc;
2146 }
2147
tcp_seq_stop(struct seq_file * seq,void * v)2148 static void tcp_seq_stop(struct seq_file *seq, void *v)
2149 {
2150 struct tcp_iter_state *st = seq->private;
2151
2152 switch (st->state) {
2153 case TCP_SEQ_STATE_OPENREQ:
2154 if (v) {
2155 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2156 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2157 }
2158 case TCP_SEQ_STATE_LISTENING:
2159 if (v != SEQ_START_TOKEN)
2160 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2161 break;
2162 case TCP_SEQ_STATE_ESTABLISHED:
2163 if (v)
2164 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2165 break;
2166 }
2167 }
2168
tcp_seq_open(struct inode * inode,struct file * file)2169 int tcp_seq_open(struct inode *inode, struct file *file)
2170 {
2171 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2172 struct tcp_iter_state *s;
2173 int err;
2174
2175 err = seq_open_net(inode, file, &afinfo->seq_ops,
2176 sizeof(struct tcp_iter_state));
2177 if (err < 0)
2178 return err;
2179
2180 s = ((struct seq_file *)file->private_data)->private;
2181 s->family = afinfo->family;
2182 s->last_pos = 0;
2183 return 0;
2184 }
2185 EXPORT_SYMBOL(tcp_seq_open);
2186
tcp_proc_register(struct net * net,struct tcp_seq_afinfo * afinfo)2187 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2188 {
2189 int rc = 0;
2190 struct proc_dir_entry *p;
2191
2192 afinfo->seq_ops.start = tcp_seq_start;
2193 afinfo->seq_ops.next = tcp_seq_next;
2194 afinfo->seq_ops.stop = tcp_seq_stop;
2195
2196 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2197 afinfo->seq_fops, afinfo);
2198 if (!p)
2199 rc = -ENOMEM;
2200 return rc;
2201 }
2202 EXPORT_SYMBOL(tcp_proc_register);
2203
tcp_proc_unregister(struct net * net,struct tcp_seq_afinfo * afinfo)2204 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2205 {
2206 remove_proc_entry(afinfo->name, net->proc_net);
2207 }
2208 EXPORT_SYMBOL(tcp_proc_unregister);
2209
get_openreq4(const struct sock * sk,const struct request_sock * req,struct seq_file * f,int i,kuid_t uid)2210 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2211 struct seq_file *f, int i, kuid_t uid)
2212 {
2213 const struct inet_request_sock *ireq = inet_rsk(req);
2214 long delta = req->expires - jiffies;
2215
2216 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2217 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2218 i,
2219 ireq->ir_loc_addr,
2220 ntohs(inet_sk(sk)->inet_sport),
2221 ireq->ir_rmt_addr,
2222 ntohs(ireq->ir_rmt_port),
2223 TCP_SYN_RECV,
2224 0, 0, /* could print option size, but that is af dependent. */
2225 1, /* timers active (only the expire timer) */
2226 jiffies_delta_to_clock_t(delta),
2227 req->num_timeout,
2228 from_kuid_munged(seq_user_ns(f), uid),
2229 0, /* non standard timer */
2230 0, /* open_requests have no inode */
2231 atomic_read(&sk->sk_refcnt),
2232 req);
2233 }
2234
get_tcp4_sock(struct sock * sk,struct seq_file * f,int i)2235 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2236 {
2237 int timer_active;
2238 unsigned long timer_expires;
2239 const struct tcp_sock *tp = tcp_sk(sk);
2240 const struct inet_connection_sock *icsk = inet_csk(sk);
2241 const struct inet_sock *inet = inet_sk(sk);
2242 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2243 __be32 dest = inet->inet_daddr;
2244 __be32 src = inet->inet_rcv_saddr;
2245 __u16 destp = ntohs(inet->inet_dport);
2246 __u16 srcp = ntohs(inet->inet_sport);
2247 int rx_queue;
2248
2249 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2250 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2251 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2252 timer_active = 1;
2253 timer_expires = icsk->icsk_timeout;
2254 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2255 timer_active = 4;
2256 timer_expires = icsk->icsk_timeout;
2257 } else if (timer_pending(&sk->sk_timer)) {
2258 timer_active = 2;
2259 timer_expires = sk->sk_timer.expires;
2260 } else {
2261 timer_active = 0;
2262 timer_expires = jiffies;
2263 }
2264
2265 if (sk->sk_state == TCP_LISTEN)
2266 rx_queue = sk->sk_ack_backlog;
2267 else
2268 /*
2269 * because we dont lock socket, we might find a transient negative value
2270 */
2271 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2272
2273 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2274 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2275 i, src, srcp, dest, destp, sk->sk_state,
2276 tp->write_seq - tp->snd_una,
2277 rx_queue,
2278 timer_active,
2279 jiffies_delta_to_clock_t(timer_expires - jiffies),
2280 icsk->icsk_retransmits,
2281 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2282 icsk->icsk_probes_out,
2283 sock_i_ino(sk),
2284 atomic_read(&sk->sk_refcnt), sk,
2285 jiffies_to_clock_t(icsk->icsk_rto),
2286 jiffies_to_clock_t(icsk->icsk_ack.ato),
2287 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2288 tp->snd_cwnd,
2289 sk->sk_state == TCP_LISTEN ?
2290 (fastopenq ? fastopenq->max_qlen : 0) :
2291 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2292 }
2293
get_timewait4_sock(const struct inet_timewait_sock * tw,struct seq_file * f,int i)2294 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2295 struct seq_file *f, int i)
2296 {
2297 __be32 dest, src;
2298 __u16 destp, srcp;
2299 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2300
2301 dest = tw->tw_daddr;
2302 src = tw->tw_rcv_saddr;
2303 destp = ntohs(tw->tw_dport);
2304 srcp = ntohs(tw->tw_sport);
2305
2306 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2307 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2308 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2309 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2310 atomic_read(&tw->tw_refcnt), tw);
2311 }
2312
2313 #define TMPSZ 150
2314
tcp4_seq_show(struct seq_file * seq,void * v)2315 static int tcp4_seq_show(struct seq_file *seq, void *v)
2316 {
2317 struct tcp_iter_state *st;
2318 struct sock *sk = v;
2319
2320 seq_setwidth(seq, TMPSZ - 1);
2321 if (v == SEQ_START_TOKEN) {
2322 seq_puts(seq, " sl local_address rem_address st tx_queue "
2323 "rx_queue tr tm->when retrnsmt uid timeout "
2324 "inode");
2325 goto out;
2326 }
2327 st = seq->private;
2328
2329 switch (st->state) {
2330 case TCP_SEQ_STATE_LISTENING:
2331 case TCP_SEQ_STATE_ESTABLISHED:
2332 if (sk->sk_state == TCP_TIME_WAIT)
2333 get_timewait4_sock(v, seq, st->num);
2334 else
2335 get_tcp4_sock(v, seq, st->num);
2336 break;
2337 case TCP_SEQ_STATE_OPENREQ:
2338 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2339 break;
2340 }
2341 out:
2342 seq_pad(seq, '\n');
2343 return 0;
2344 }
2345
2346 static const struct file_operations tcp_afinfo_seq_fops = {
2347 .owner = THIS_MODULE,
2348 .open = tcp_seq_open,
2349 .read = seq_read,
2350 .llseek = seq_lseek,
2351 .release = seq_release_net
2352 };
2353
2354 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2355 .name = "tcp",
2356 .family = AF_INET,
2357 .seq_fops = &tcp_afinfo_seq_fops,
2358 .seq_ops = {
2359 .show = tcp4_seq_show,
2360 },
2361 };
2362
tcp4_proc_init_net(struct net * net)2363 static int __net_init tcp4_proc_init_net(struct net *net)
2364 {
2365 return tcp_proc_register(net, &tcp4_seq_afinfo);
2366 }
2367
tcp4_proc_exit_net(struct net * net)2368 static void __net_exit tcp4_proc_exit_net(struct net *net)
2369 {
2370 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2371 }
2372
2373 static struct pernet_operations tcp4_net_ops = {
2374 .init = tcp4_proc_init_net,
2375 .exit = tcp4_proc_exit_net,
2376 };
2377
tcp4_proc_init(void)2378 int __init tcp4_proc_init(void)
2379 {
2380 return register_pernet_subsys(&tcp4_net_ops);
2381 }
2382
tcp4_proc_exit(void)2383 void tcp4_proc_exit(void)
2384 {
2385 unregister_pernet_subsys(&tcp4_net_ops);
2386 }
2387 #endif /* CONFIG_PROC_FS */
2388
2389 struct proto tcp_prot = {
2390 .name = "TCP",
2391 .owner = THIS_MODULE,
2392 .close = tcp_close,
2393 .connect = tcp_v4_connect,
2394 .disconnect = tcp_disconnect,
2395 .accept = inet_csk_accept,
2396 .ioctl = tcp_ioctl,
2397 .init = tcp_v4_init_sock,
2398 .destroy = tcp_v4_destroy_sock,
2399 .shutdown = tcp_shutdown,
2400 .setsockopt = tcp_setsockopt,
2401 .getsockopt = tcp_getsockopt,
2402 .recvmsg = tcp_recvmsg,
2403 .sendmsg = tcp_sendmsg,
2404 .sendpage = tcp_sendpage,
2405 .backlog_rcv = tcp_v4_do_rcv,
2406 .release_cb = tcp_release_cb,
2407 .hash = inet_hash,
2408 .unhash = inet_unhash,
2409 .get_port = inet_csk_get_port,
2410 .enter_memory_pressure = tcp_enter_memory_pressure,
2411 .stream_memory_free = tcp_stream_memory_free,
2412 .sockets_allocated = &tcp_sockets_allocated,
2413 .orphan_count = &tcp_orphan_count,
2414 .memory_allocated = &tcp_memory_allocated,
2415 .memory_pressure = &tcp_memory_pressure,
2416 .sysctl_mem = sysctl_tcp_mem,
2417 .sysctl_wmem = sysctl_tcp_wmem,
2418 .sysctl_rmem = sysctl_tcp_rmem,
2419 .max_header = MAX_TCP_HEADER,
2420 .obj_size = sizeof(struct tcp_sock),
2421 .slab_flags = SLAB_DESTROY_BY_RCU,
2422 .twsk_prot = &tcp_timewait_sock_ops,
2423 .rsk_prot = &tcp_request_sock_ops,
2424 .h.hashinfo = &tcp_hashinfo,
2425 .no_autobind = true,
2426 #ifdef CONFIG_COMPAT
2427 .compat_setsockopt = compat_tcp_setsockopt,
2428 .compat_getsockopt = compat_tcp_getsockopt,
2429 #endif
2430 #ifdef CONFIG_MEMCG_KMEM
2431 .init_cgroup = tcp_init_cgroup,
2432 .destroy_cgroup = tcp_destroy_cgroup,
2433 .proto_cgroup = tcp_proto_cgroup,
2434 #endif
2435 .diag_destroy = tcp_abort,
2436 };
2437 EXPORT_SYMBOL(tcp_prot);
2438
tcp_sk_exit(struct net * net)2439 static void __net_exit tcp_sk_exit(struct net *net)
2440 {
2441 int cpu;
2442
2443 for_each_possible_cpu(cpu)
2444 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2445 free_percpu(net->ipv4.tcp_sk);
2446 }
2447
tcp_sk_init(struct net * net)2448 static int __net_init tcp_sk_init(struct net *net)
2449 {
2450 int res, cpu;
2451
2452 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2453 if (!net->ipv4.tcp_sk)
2454 return -ENOMEM;
2455
2456 for_each_possible_cpu(cpu) {
2457 struct sock *sk;
2458
2459 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2460 IPPROTO_TCP, net);
2461 if (res)
2462 goto fail;
2463 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2464 }
2465 net->ipv4.sysctl_tcp_ecn = 2;
2466 return 0;
2467
2468 fail:
2469 tcp_sk_exit(net);
2470
2471 return res;
2472 }
2473
tcp_sk_exit_batch(struct list_head * net_exit_list)2474 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2475 {
2476 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2477 }
2478
2479 static struct pernet_operations __net_initdata tcp_sk_ops = {
2480 .init = tcp_sk_init,
2481 .exit = tcp_sk_exit,
2482 .exit_batch = tcp_sk_exit_batch,
2483 };
2484
tcp_v4_init(void)2485 void __init tcp_v4_init(void)
2486 {
2487 inet_hashinfo_init(&tcp_hashinfo);
2488 if (register_pernet_subsys(&tcp_sk_ops))
2489 panic("Failed to create the TCP control socket.\n");
2490 }
2491