1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * PF_INET protocol family socket handler.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Florian La Roche, <flla@stud.uni-sb.de>
12 * Alan Cox, <A.Cox@swansea.ac.uk>
13 *
14 * Changes (see also sock.c)
15 *
16 * piggy,
17 * Karl Knutson : Socket protocol table
18 * A.N.Kuznetsov : Socket death error in accept().
19 * John Richardson : Fix non blocking error in connect()
20 * so sockets that fail to connect
21 * don't return -EINPROGRESS.
22 * Alan Cox : Asynchronous I/O support
23 * Alan Cox : Keep correct socket pointer on sock
24 * structures
25 * when accept() ed
26 * Alan Cox : Semantics of SO_LINGER aren't state
27 * moved to close when you look carefully.
28 * With this fixed and the accept bug fixed
29 * some RPC stuff seems happier.
30 * Niibe Yutaka : 4.4BSD style write async I/O
31 * Alan Cox,
32 * Tony Gale : Fixed reuse semantics.
33 * Alan Cox : bind() shouldn't abort existing but dead
34 * sockets. Stops FTP netin:.. I hope.
35 * Alan Cox : bind() works correctly for RAW sockets.
36 * Note that FreeBSD at least was broken
37 * in this respect so be careful with
38 * compatibility tests...
39 * Alan Cox : routing cache support
40 * Alan Cox : memzero the socket structure for
41 * compactness.
42 * Matt Day : nonblock connect error handler
43 * Alan Cox : Allow large numbers of pending sockets
44 * (eg for big web sites), but only if
45 * specifically application requested.
46 * Alan Cox : New buffering throughout IP. Used
47 * dumbly.
48 * Alan Cox : New buffering now used smartly.
49 * Alan Cox : BSD rather than common sense
50 * interpretation of listen.
51 * Germano Caronni : Assorted small races.
52 * Alan Cox : sendmsg/recvmsg basic support.
53 * Alan Cox : Only sendmsg/recvmsg now supported.
54 * Alan Cox : Locked down bind (see security list).
55 * Alan Cox : Loosened bind a little.
56 * Mike McLagan : ADD/DEL DLCI Ioctls
57 * Willy Konynenberg : Transparent proxying support.
58 * David S. Miller : New socket lookup architecture.
59 * Some other random speedups.
60 * Cyrus Durgin : Cleaned up file for kmod hacks.
61 * Andi Kleen : Fix inet_stream_connect TCP race.
62 */
63
64 #define pr_fmt(fmt) "IPv4: " fmt
65
66 #include <linux/err.h>
67 #include <linux/errno.h>
68 #include <linux/types.h>
69 #include <linux/socket.h>
70 #include <linux/in.h>
71 #include <linux/kernel.h>
72 #include <linux/kmod.h>
73 #include <linux/sched.h>
74 #include <linux/timer.h>
75 #include <linux/string.h>
76 #include <linux/sockios.h>
77 #include <linux/net.h>
78 #include <linux/capability.h>
79 #include <linux/fcntl.h>
80 #include <linux/mm.h>
81 #include <linux/interrupt.h>
82 #include <linux/stat.h>
83 #include <linux/init.h>
84 #include <linux/poll.h>
85 #include <linux/netfilter_ipv4.h>
86 #include <linux/random.h>
87 #include <linux/slab.h>
88
89 #include <linux/uaccess.h>
90
91 #include <linux/inet.h>
92 #include <linux/igmp.h>
93 #include <linux/inetdevice.h>
94 #include <linux/netdevice.h>
95 #include <net/checksum.h>
96 #include <net/ip.h>
97 #include <net/protocol.h>
98 #include <net/arp.h>
99 #include <net/route.h>
100 #include <net/ip_fib.h>
101 #include <net/inet_connection_sock.h>
102 #include <net/gro.h>
103 #include <net/gso.h>
104 #include <net/tcp.h>
105 #include <net/udp.h>
106 #include <net/udplite.h>
107 #include <net/ping.h>
108 #include <linux/skbuff.h>
109 #include <net/sock.h>
110 #include <net/raw.h>
111 #include <net/icmp.h>
112 #include <net/inet_common.h>
113 #include <net/ip_tunnels.h>
114 #include <net/xfrm.h>
115 #include <net/net_namespace.h>
116 #include <net/secure_seq.h>
117 #ifdef CONFIG_IP_MROUTE
118 #include <linux/mroute.h>
119 #endif
120 #include <net/l3mdev.h>
121 #include <net/compat.h>
122 #include <net/rps.h>
123
124 #include <trace/events/sock.h>
125 #include <trace/hooks/net.h>
126
127 /* The inetsw table contains everything that inet_create needs to
128 * build a new socket.
129 */
130 static struct list_head inetsw[SOCK_MAX];
131 static DEFINE_SPINLOCK(inetsw_lock);
132
133 /* New destruction routine */
134
inet_sock_destruct(struct sock * sk)135 void inet_sock_destruct(struct sock *sk)
136 {
137 struct inet_sock *inet = inet_sk(sk);
138
139 __skb_queue_purge(&sk->sk_receive_queue);
140 __skb_queue_purge(&sk->sk_error_queue);
141
142 sk_mem_reclaim_final(sk);
143
144 if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
145 pr_err("Attempt to release TCP socket in state %d %p\n",
146 sk->sk_state, sk);
147 return;
148 }
149 if (!sock_flag(sk, SOCK_DEAD)) {
150 pr_err("Attempt to release alive inet socket %p\n", sk);
151 return;
152 }
153
154 WARN_ON_ONCE(atomic_read(&sk->sk_rmem_alloc));
155 WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
156 WARN_ON_ONCE(sk->sk_wmem_queued);
157 WARN_ON_ONCE(sk_forward_alloc_get(sk));
158
159 kfree(rcu_dereference_protected(inet->inet_opt, 1));
160 dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
161 dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
162 }
163 EXPORT_SYMBOL(inet_sock_destruct);
164
165 /*
166 * The routines beyond this point handle the behaviour of an AF_INET
167 * socket object. Mostly it punts to the subprotocols of IP to do
168 * the work.
169 */
170
171 /*
172 * Automatically bind an unbound socket.
173 */
174
inet_autobind(struct sock * sk)175 static int inet_autobind(struct sock *sk)
176 {
177 struct inet_sock *inet;
178 /* We may need to bind the socket. */
179 lock_sock(sk);
180 inet = inet_sk(sk);
181 if (!inet->inet_num) {
182 if (sk->sk_prot->get_port(sk, 0)) {
183 release_sock(sk);
184 return -EAGAIN;
185 }
186 inet->inet_sport = htons(inet->inet_num);
187 }
188 release_sock(sk);
189 return 0;
190 }
191
__inet_listen_sk(struct sock * sk,int backlog)192 int __inet_listen_sk(struct sock *sk, int backlog)
193 {
194 unsigned char old_state = sk->sk_state;
195 int err, tcp_fastopen;
196
197 if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
198 return -EINVAL;
199
200 WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
201 /* Really, if the socket is already in listen state
202 * we can only allow the backlog to be adjusted.
203 */
204 if (old_state != TCP_LISTEN) {
205 /* Enable TFO w/o requiring TCP_FASTOPEN socket option.
206 * Note that only TCP sockets (SOCK_STREAM) will reach here.
207 * Also fastopen backlog may already been set via the option
208 * because the socket was in TCP_LISTEN state previously but
209 * was shutdown() rather than close().
210 */
211 tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
212 if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
213 (tcp_fastopen & TFO_SERVER_ENABLE) &&
214 !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
215 fastopen_queue_tune(sk, backlog);
216 tcp_fastopen_init_key_once(sock_net(sk));
217 }
218
219 err = inet_csk_listen_start(sk);
220 if (err)
221 return err;
222
223 tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
224 }
225 return 0;
226 }
227
228 /*
229 * Move a socket into listening state.
230 */
inet_listen(struct socket * sock,int backlog)231 int inet_listen(struct socket *sock, int backlog)
232 {
233 struct sock *sk = sock->sk;
234 int err = -EINVAL;
235
236 lock_sock(sk);
237
238 if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
239 goto out;
240
241 err = __inet_listen_sk(sk, backlog);
242
243 out:
244 release_sock(sk);
245 return err;
246 }
247 EXPORT_SYMBOL(inet_listen);
248
249 /*
250 * Create an inet socket.
251 */
252
inet_create(struct net * net,struct socket * sock,int protocol,int kern)253 static int inet_create(struct net *net, struct socket *sock, int protocol,
254 int kern)
255 {
256 struct sock *sk = NULL;
257 struct inet_protosw *answer;
258 struct inet_sock *inet;
259 struct proto *answer_prot;
260 unsigned char answer_flags;
261 int try_loading_module = 0;
262 int err;
263
264 if (protocol < 0 || protocol >= IPPROTO_MAX)
265 return -EINVAL;
266
267 sock->state = SS_UNCONNECTED;
268
269 /* Look for the requested type/protocol pair. */
270 lookup_protocol:
271 err = -ESOCKTNOSUPPORT;
272 rcu_read_lock();
273 list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
274
275 err = 0;
276 /* Check the non-wild match. */
277 if (protocol == answer->protocol) {
278 if (protocol != IPPROTO_IP)
279 break;
280 } else {
281 /* Check for the two wild cases. */
282 if (IPPROTO_IP == protocol) {
283 protocol = answer->protocol;
284 break;
285 }
286 if (IPPROTO_IP == answer->protocol)
287 break;
288 }
289 err = -EPROTONOSUPPORT;
290 }
291
292 if (unlikely(err)) {
293 if (try_loading_module < 2) {
294 rcu_read_unlock();
295 /*
296 * Be more specific, e.g. net-pf-2-proto-132-type-1
297 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
298 */
299 if (++try_loading_module == 1)
300 request_module("net-pf-%d-proto-%d-type-%d",
301 PF_INET, protocol, sock->type);
302 /*
303 * Fall back to generic, e.g. net-pf-2-proto-132
304 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
305 */
306 else
307 request_module("net-pf-%d-proto-%d",
308 PF_INET, protocol);
309 goto lookup_protocol;
310 } else
311 goto out_rcu_unlock;
312 }
313
314 err = -EPERM;
315 if (sock->type == SOCK_RAW && !kern &&
316 !ns_capable(net->user_ns, CAP_NET_RAW))
317 goto out_rcu_unlock;
318
319 sock->ops = answer->ops;
320 answer_prot = answer->prot;
321 answer_flags = answer->flags;
322 rcu_read_unlock();
323
324 WARN_ON(!answer_prot->slab);
325
326 err = -ENOMEM;
327 sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
328 if (!sk)
329 goto out;
330
331 err = 0;
332 if (INET_PROTOSW_REUSE & answer_flags)
333 sk->sk_reuse = SK_CAN_REUSE;
334
335 if (INET_PROTOSW_ICSK & answer_flags)
336 inet_init_csk_locks(sk);
337
338 inet = inet_sk(sk);
339 inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
340
341 inet_clear_bit(NODEFRAG, sk);
342
343 if (SOCK_RAW == sock->type) {
344 inet->inet_num = protocol;
345 if (IPPROTO_RAW == protocol)
346 inet_set_bit(HDRINCL, sk);
347 }
348
349 if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
350 inet->pmtudisc = IP_PMTUDISC_DONT;
351 else
352 inet->pmtudisc = IP_PMTUDISC_WANT;
353
354 atomic_set(&inet->inet_id, 0);
355
356 sock_init_data(sock, sk);
357
358 sk->sk_destruct = inet_sock_destruct;
359 sk->sk_protocol = protocol;
360 sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
361 sk->sk_txrehash = READ_ONCE(net->core.sysctl_txrehash);
362
363 inet->uc_ttl = -1;
364 inet_set_bit(MC_LOOP, sk);
365 inet->mc_ttl = 1;
366 inet_set_bit(MC_ALL, sk);
367 inet->mc_index = 0;
368 inet->mc_list = NULL;
369 inet->rcv_tos = 0;
370
371 if (inet->inet_num) {
372 /* It assumes that any protocol which allows
373 * the user to assign a number at socket
374 * creation time automatically
375 * shares.
376 */
377 inet->inet_sport = htons(inet->inet_num);
378 /* Add to protocol hash chains. */
379 err = sk->sk_prot->hash(sk);
380 if (err)
381 goto out_sk_release;
382 }
383
384 if (sk->sk_prot->init) {
385 err = sk->sk_prot->init(sk);
386 if (err)
387 goto out_sk_release;
388 }
389
390 if (!kern) {
391 err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
392 if (err)
393 goto out_sk_release;
394 }
395
396 trace_android_rvh_inet_sock_create(sk);
397
398 out:
399 trace_android_vh_inet_create(sk, err);
400 return err;
401 out_rcu_unlock:
402 rcu_read_unlock();
403 goto out;
404 out_sk_release:
405 sk_common_release(sk);
406 sock->sk = NULL;
407 goto out;
408 }
409
410
411 /*
412 * The peer socket should always be NULL (or else). When we call this
413 * function we are destroying the object and from then on nobody
414 * should refer to it.
415 */
inet_release(struct socket * sock)416 int inet_release(struct socket *sock)
417 {
418 struct sock *sk = sock->sk;
419
420 if (sk) {
421 long timeout;
422
423 if (!sk->sk_kern_sock)
424 BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
425
426 trace_android_rvh_inet_sock_release(sk);
427
428 /* Applications forget to leave groups before exiting */
429 ip_mc_drop_socket(sk);
430
431 /* If linger is set, we don't return until the close
432 * is complete. Otherwise we return immediately. The
433 * actually closing is done the same either way.
434 *
435 * If the close is due to the process exiting, we never
436 * linger..
437 */
438 timeout = 0;
439 if (sock_flag(sk, SOCK_LINGER) &&
440 !(current->flags & PF_EXITING))
441 timeout = sk->sk_lingertime;
442 sk->sk_prot->close(sk, timeout);
443 sock->sk = NULL;
444 }
445 return 0;
446 }
447 EXPORT_SYMBOL(inet_release);
448
inet_bind_sk(struct sock * sk,struct sockaddr * uaddr,int addr_len)449 int inet_bind_sk(struct sock *sk, struct sockaddr *uaddr, int addr_len)
450 {
451 u32 flags = BIND_WITH_LOCK;
452 int err;
453
454 /* If the socket has its own bind function then use it. (RAW) */
455 if (sk->sk_prot->bind) {
456 return sk->sk_prot->bind(sk, uaddr, addr_len);
457 }
458 if (addr_len < sizeof(struct sockaddr_in))
459 return -EINVAL;
460
461 /* BPF prog is run before any checks are done so that if the prog
462 * changes context in a wrong way it will be caught.
463 */
464 err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, &addr_len,
465 CGROUP_INET4_BIND, &flags);
466 if (err)
467 return err;
468
469 return __inet_bind(sk, uaddr, addr_len, flags);
470 }
471
inet_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)472 int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
473 {
474 return inet_bind_sk(sock->sk, uaddr, addr_len);
475 }
476 EXPORT_SYMBOL(inet_bind);
477
__inet_bind(struct sock * sk,struct sockaddr * uaddr,int addr_len,u32 flags)478 int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
479 u32 flags)
480 {
481 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
482 struct inet_sock *inet = inet_sk(sk);
483 struct net *net = sock_net(sk);
484 unsigned short snum;
485 int chk_addr_ret;
486 u32 tb_id = RT_TABLE_LOCAL;
487 int err;
488
489 if (addr->sin_family != AF_INET) {
490 /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
491 * only if s_addr is INADDR_ANY.
492 */
493 err = -EAFNOSUPPORT;
494 if (addr->sin_family != AF_UNSPEC ||
495 addr->sin_addr.s_addr != htonl(INADDR_ANY))
496 goto out;
497 }
498
499 tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
500 chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
501
502 /* Not specified by any standard per-se, however it breaks too
503 * many applications when removed. It is unfortunate since
504 * allowing applications to make a non-local bind solves
505 * several problems with systems using dynamic addressing.
506 * (ie. your servers still start up even if your ISDN link
507 * is temporarily down)
508 */
509 err = -EADDRNOTAVAIL;
510 if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
511 chk_addr_ret))
512 goto out;
513
514 snum = ntohs(addr->sin_port);
515 err = -EACCES;
516 if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) &&
517 snum && inet_port_requires_bind_service(net, snum) &&
518 !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
519 goto out;
520
521 /* We keep a pair of addresses. rcv_saddr is the one
522 * used by hash lookups, and saddr is used for transmit.
523 *
524 * In the BSD API these are the same except where it
525 * would be illegal to use them (multicast/broadcast) in
526 * which case the sending device address is used.
527 */
528 if (flags & BIND_WITH_LOCK)
529 lock_sock(sk);
530
531 /* Check these errors (active socket, double bind). */
532 err = -EINVAL;
533 if (sk->sk_state != TCP_CLOSE || inet->inet_num)
534 goto out_release_sock;
535
536 inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
537 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
538 inet->inet_saddr = 0; /* Use device */
539
540 /* Make sure we are allowed to bind here. */
541 if (snum || !(inet_test_bit(BIND_ADDRESS_NO_PORT, sk) ||
542 (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
543 err = sk->sk_prot->get_port(sk, snum);
544 if (err) {
545 inet->inet_saddr = inet->inet_rcv_saddr = 0;
546 goto out_release_sock;
547 }
548 if (!(flags & BIND_FROM_BPF)) {
549 err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
550 if (err) {
551 inet->inet_saddr = inet->inet_rcv_saddr = 0;
552 if (sk->sk_prot->put_port)
553 sk->sk_prot->put_port(sk);
554 goto out_release_sock;
555 }
556 }
557 }
558
559 if (inet->inet_rcv_saddr)
560 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
561 if (snum)
562 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
563 inet->inet_sport = htons(inet->inet_num);
564 inet->inet_daddr = 0;
565 inet->inet_dport = 0;
566 sk_dst_reset(sk);
567 err = 0;
568 out_release_sock:
569 if (flags & BIND_WITH_LOCK)
570 release_sock(sk);
571 out:
572 return err;
573 }
574
inet_dgram_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags)575 int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
576 int addr_len, int flags)
577 {
578 struct sock *sk = sock->sk;
579 const struct proto *prot;
580 int err;
581
582 if (addr_len < sizeof(uaddr->sa_family))
583 return -EINVAL;
584
585 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
586 prot = READ_ONCE(sk->sk_prot);
587
588 if (uaddr->sa_family == AF_UNSPEC)
589 return prot->disconnect(sk, flags);
590
591 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
592 err = prot->pre_connect(sk, uaddr, addr_len);
593 if (err)
594 return err;
595 }
596
597 if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
598 return -EAGAIN;
599 return prot->connect(sk, uaddr, addr_len);
600 }
601 EXPORT_SYMBOL(inet_dgram_connect);
602
inet_wait_for_connect(struct sock * sk,long timeo,int writebias)603 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
604 {
605 DEFINE_WAIT_FUNC(wait, woken_wake_function);
606
607 add_wait_queue(sk_sleep(sk), &wait);
608 sk->sk_write_pending += writebias;
609
610 /* Basic assumption: if someone sets sk->sk_err, he _must_
611 * change state of the socket from TCP_SYN_*.
612 * Connect() does not allow to get error notifications
613 * without closing the socket.
614 */
615 while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
616 release_sock(sk);
617 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
618 lock_sock(sk);
619 if (signal_pending(current) || !timeo)
620 break;
621 }
622 remove_wait_queue(sk_sleep(sk), &wait);
623 sk->sk_write_pending -= writebias;
624 return timeo;
625 }
626
627 /*
628 * Connect to a remote host. There is regrettably still a little
629 * TCP 'magic' in here.
630 */
__inet_stream_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags,int is_sendmsg)631 int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
632 int addr_len, int flags, int is_sendmsg)
633 {
634 struct sock *sk = sock->sk;
635 int err;
636 long timeo;
637
638 /*
639 * uaddr can be NULL and addr_len can be 0 if:
640 * sk is a TCP fastopen active socket and
641 * TCP_FASTOPEN_CONNECT sockopt is set and
642 * we already have a valid cookie for this socket.
643 * In this case, user can call write() after connect().
644 * write() will invoke tcp_sendmsg_fastopen() which calls
645 * __inet_stream_connect().
646 */
647 if (uaddr) {
648 if (addr_len < sizeof(uaddr->sa_family))
649 return -EINVAL;
650
651 if (uaddr->sa_family == AF_UNSPEC) {
652 sk->sk_disconnects++;
653 err = sk->sk_prot->disconnect(sk, flags);
654 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
655 goto out;
656 }
657 }
658
659 switch (sock->state) {
660 default:
661 err = -EINVAL;
662 goto out;
663 case SS_CONNECTED:
664 err = -EISCONN;
665 goto out;
666 case SS_CONNECTING:
667 if (inet_test_bit(DEFER_CONNECT, sk))
668 err = is_sendmsg ? -EINPROGRESS : -EISCONN;
669 else
670 err = -EALREADY;
671 /* Fall out of switch with err, set for this state */
672 break;
673 case SS_UNCONNECTED:
674 err = -EISCONN;
675 if (sk->sk_state != TCP_CLOSE)
676 goto out;
677
678 if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
679 err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
680 if (err)
681 goto out;
682 }
683
684 err = sk->sk_prot->connect(sk, uaddr, addr_len);
685 if (err < 0)
686 goto out;
687
688 sock->state = SS_CONNECTING;
689
690 if (!err && inet_test_bit(DEFER_CONNECT, sk))
691 goto out;
692
693 /* Just entered SS_CONNECTING state; the only
694 * difference is that return value in non-blocking
695 * case is EINPROGRESS, rather than EALREADY.
696 */
697 err = -EINPROGRESS;
698 break;
699 }
700
701 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
702
703 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
704 int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
705 tcp_sk(sk)->fastopen_req &&
706 tcp_sk(sk)->fastopen_req->data ? 1 : 0;
707 int dis = sk->sk_disconnects;
708
709 /* Error code is set above */
710 if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
711 goto out;
712
713 err = sock_intr_errno(timeo);
714 if (signal_pending(current))
715 goto out;
716
717 if (dis != sk->sk_disconnects) {
718 err = -EPIPE;
719 goto out;
720 }
721 }
722
723 /* Connection was closed by RST, timeout, ICMP error
724 * or another process disconnected us.
725 */
726 if (sk->sk_state == TCP_CLOSE)
727 goto sock_error;
728
729 /* sk->sk_err may be not zero now, if RECVERR was ordered by user
730 * and error was received after socket entered established state.
731 * Hence, it is handled normally after connect() return successfully.
732 */
733
734 sock->state = SS_CONNECTED;
735 err = 0;
736 out:
737 return err;
738
739 sock_error:
740 err = sock_error(sk) ? : -ECONNABORTED;
741 sock->state = SS_UNCONNECTED;
742 sk->sk_disconnects++;
743 if (sk->sk_prot->disconnect(sk, flags))
744 sock->state = SS_DISCONNECTING;
745 goto out;
746 }
747 EXPORT_SYMBOL(__inet_stream_connect);
748
inet_stream_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags)749 int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
750 int addr_len, int flags)
751 {
752 int err;
753
754 lock_sock(sock->sk);
755 err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
756 release_sock(sock->sk);
757 return err;
758 }
759 EXPORT_SYMBOL(inet_stream_connect);
760
__inet_accept(struct socket * sock,struct socket * newsock,struct sock * newsk)761 void __inet_accept(struct socket *sock, struct socket *newsock, struct sock *newsk)
762 {
763 sock_rps_record_flow(newsk);
764 WARN_ON(!((1 << newsk->sk_state) &
765 (TCPF_ESTABLISHED | TCPF_SYN_RECV |
766 TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 |
767 TCPF_CLOSING | TCPF_CLOSE_WAIT |
768 TCPF_CLOSE)));
769
770 if (test_bit(SOCK_SUPPORT_ZC, &sock->flags))
771 set_bit(SOCK_SUPPORT_ZC, &newsock->flags);
772 sock_graft(newsk, newsock);
773
774 newsock->state = SS_CONNECTED;
775 }
776
777 /*
778 * Accept a pending connection. The TCP layer now gives BSD semantics.
779 */
780
inet_accept(struct socket * sock,struct socket * newsock,struct proto_accept_arg * arg)781 int inet_accept(struct socket *sock, struct socket *newsock,
782 struct proto_accept_arg *arg)
783 {
784 struct sock *sk1 = sock->sk, *sk2;
785
786 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
787 arg->err = -EINVAL;
788 sk2 = READ_ONCE(sk1->sk_prot)->accept(sk1, arg);
789 if (!sk2)
790 return arg->err;
791
792 lock_sock(sk2);
793 __inet_accept(sock, newsock, sk2);
794 release_sock(sk2);
795 return 0;
796 }
797 EXPORT_SYMBOL(inet_accept);
798
799 /*
800 * This does both peername and sockname.
801 */
inet_getname(struct socket * sock,struct sockaddr * uaddr,int peer)802 int inet_getname(struct socket *sock, struct sockaddr *uaddr,
803 int peer)
804 {
805 struct sock *sk = sock->sk;
806 struct inet_sock *inet = inet_sk(sk);
807 DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
808 int sin_addr_len = sizeof(*sin);
809
810 sin->sin_family = AF_INET;
811 lock_sock(sk);
812 if (peer) {
813 if (!inet->inet_dport ||
814 (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
815 peer == 1)) {
816 release_sock(sk);
817 return -ENOTCONN;
818 }
819 sin->sin_port = inet->inet_dport;
820 sin->sin_addr.s_addr = inet->inet_daddr;
821 BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
822 CGROUP_INET4_GETPEERNAME);
823 } else {
824 __be32 addr = inet->inet_rcv_saddr;
825 if (!addr)
826 addr = inet->inet_saddr;
827 sin->sin_port = inet->inet_sport;
828 sin->sin_addr.s_addr = addr;
829 BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin, &sin_addr_len,
830 CGROUP_INET4_GETSOCKNAME);
831 }
832 release_sock(sk);
833 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
834 return sin_addr_len;
835 }
836 EXPORT_SYMBOL(inet_getname);
837
inet_send_prepare(struct sock * sk)838 int inet_send_prepare(struct sock *sk)
839 {
840 sock_rps_record_flow(sk);
841
842 /* We may need to bind the socket. */
843 if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
844 inet_autobind(sk))
845 return -EAGAIN;
846
847 return 0;
848 }
849 EXPORT_SYMBOL_GPL(inet_send_prepare);
850
inet_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)851 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
852 {
853 struct sock *sk = sock->sk;
854
855 if (unlikely(inet_send_prepare(sk)))
856 return -EAGAIN;
857
858 return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg,
859 sk, msg, size);
860 }
861 EXPORT_SYMBOL(inet_sendmsg);
862
inet_splice_eof(struct socket * sock)863 void inet_splice_eof(struct socket *sock)
864 {
865 const struct proto *prot;
866 struct sock *sk = sock->sk;
867
868 if (unlikely(inet_send_prepare(sk)))
869 return;
870
871 /* IPV6_ADDRFORM can change sk->sk_prot under us. */
872 prot = READ_ONCE(sk->sk_prot);
873 if (prot->splice_eof)
874 prot->splice_eof(sock);
875 }
876 EXPORT_SYMBOL_GPL(inet_splice_eof);
877
878 INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *,
879 size_t, int, int *));
inet_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)880 int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
881 int flags)
882 {
883 struct sock *sk = sock->sk;
884 int addr_len = 0;
885 int err;
886
887 if (likely(!(flags & MSG_ERRQUEUE)))
888 sock_rps_record_flow(sk);
889
890 err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg,
891 sk, msg, size, flags, &addr_len);
892 if (err >= 0)
893 msg->msg_namelen = addr_len;
894 return err;
895 }
896 EXPORT_SYMBOL(inet_recvmsg);
897
inet_shutdown(struct socket * sock,int how)898 int inet_shutdown(struct socket *sock, int how)
899 {
900 struct sock *sk = sock->sk;
901 int err = 0;
902
903 /* This should really check to make sure
904 * the socket is a TCP socket. (WHY AC...)
905 */
906 how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
907 1->2 bit 2 snds.
908 2->3 */
909 if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */
910 return -EINVAL;
911
912 lock_sock(sk);
913 if (sock->state == SS_CONNECTING) {
914 if ((1 << sk->sk_state) &
915 (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
916 sock->state = SS_DISCONNECTING;
917 else
918 sock->state = SS_CONNECTED;
919 }
920
921 switch (sk->sk_state) {
922 case TCP_CLOSE:
923 err = -ENOTCONN;
924 /* Hack to wake up other listeners, who can poll for
925 EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
926 fallthrough;
927 default:
928 WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
929 if (sk->sk_prot->shutdown)
930 sk->sk_prot->shutdown(sk, how);
931 break;
932
933 /* Remaining two branches are temporary solution for missing
934 * close() in multithreaded environment. It is _not_ a good idea,
935 * but we have no choice until close() is repaired at VFS level.
936 */
937 case TCP_LISTEN:
938 if (!(how & RCV_SHUTDOWN))
939 break;
940 fallthrough;
941 case TCP_SYN_SENT:
942 err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
943 sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
944 break;
945 }
946
947 /* Wake up anyone sleeping in poll. */
948 sk->sk_state_change(sk);
949 release_sock(sk);
950 return err;
951 }
952 EXPORT_SYMBOL(inet_shutdown);
953
954 /*
955 * ioctl() calls you can issue on an INET socket. Most of these are
956 * device configuration and stuff and very rarely used. Some ioctls
957 * pass on to the socket itself.
958 *
959 * NOTE: I like the idea of a module for the config stuff. ie ifconfig
960 * loads the devconfigure module does its configuring and unloads it.
961 * There's a good 20K of config code hanging around the kernel.
962 */
963
inet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)964 int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
965 {
966 struct sock *sk = sock->sk;
967 int err = 0;
968 struct net *net = sock_net(sk);
969 void __user *p = (void __user *)arg;
970 struct ifreq ifr;
971 struct rtentry rt;
972
973 switch (cmd) {
974 case SIOCADDRT:
975 case SIOCDELRT:
976 if (copy_from_user(&rt, p, sizeof(struct rtentry)))
977 return -EFAULT;
978 err = ip_rt_ioctl(net, cmd, &rt);
979 break;
980 case SIOCRTMSG:
981 err = -EINVAL;
982 break;
983 case SIOCDARP:
984 case SIOCGARP:
985 case SIOCSARP:
986 err = arp_ioctl(net, cmd, (void __user *)arg);
987 break;
988 case SIOCGIFADDR:
989 case SIOCGIFBRDADDR:
990 case SIOCGIFNETMASK:
991 case SIOCGIFDSTADDR:
992 case SIOCGIFPFLAGS:
993 if (get_user_ifreq(&ifr, NULL, p))
994 return -EFAULT;
995 err = devinet_ioctl(net, cmd, &ifr);
996 if (!err && put_user_ifreq(&ifr, p))
997 err = -EFAULT;
998 break;
999
1000 case SIOCSIFADDR:
1001 case SIOCSIFBRDADDR:
1002 case SIOCSIFNETMASK:
1003 case SIOCSIFDSTADDR:
1004 case SIOCSIFPFLAGS:
1005 case SIOCSIFFLAGS:
1006 if (get_user_ifreq(&ifr, NULL, p))
1007 return -EFAULT;
1008 err = devinet_ioctl(net, cmd, &ifr);
1009 break;
1010 default:
1011 if (sk->sk_prot->ioctl)
1012 err = sk_ioctl(sk, cmd, (void __user *)arg);
1013 else
1014 err = -ENOIOCTLCMD;
1015 break;
1016 }
1017 return err;
1018 }
1019 EXPORT_SYMBOL(inet_ioctl);
1020
1021 #ifdef CONFIG_COMPAT
inet_compat_routing_ioctl(struct sock * sk,unsigned int cmd,struct compat_rtentry __user * ur)1022 static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd,
1023 struct compat_rtentry __user *ur)
1024 {
1025 compat_uptr_t rtdev;
1026 struct rtentry rt;
1027
1028 if (copy_from_user(&rt.rt_dst, &ur->rt_dst,
1029 3 * sizeof(struct sockaddr)) ||
1030 get_user(rt.rt_flags, &ur->rt_flags) ||
1031 get_user(rt.rt_metric, &ur->rt_metric) ||
1032 get_user(rt.rt_mtu, &ur->rt_mtu) ||
1033 get_user(rt.rt_window, &ur->rt_window) ||
1034 get_user(rt.rt_irtt, &ur->rt_irtt) ||
1035 get_user(rtdev, &ur->rt_dev))
1036 return -EFAULT;
1037
1038 rt.rt_dev = compat_ptr(rtdev);
1039 return ip_rt_ioctl(sock_net(sk), cmd, &rt);
1040 }
1041
inet_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1042 static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1043 {
1044 void __user *argp = compat_ptr(arg);
1045 struct sock *sk = sock->sk;
1046
1047 switch (cmd) {
1048 case SIOCADDRT:
1049 case SIOCDELRT:
1050 return inet_compat_routing_ioctl(sk, cmd, argp);
1051 default:
1052 if (!sk->sk_prot->compat_ioctl)
1053 return -ENOIOCTLCMD;
1054 return sk->sk_prot->compat_ioctl(sk, cmd, arg);
1055 }
1056 }
1057 #endif /* CONFIG_COMPAT */
1058
1059 const struct proto_ops inet_stream_ops = {
1060 .family = PF_INET,
1061 .owner = THIS_MODULE,
1062 .release = inet_release,
1063 .bind = inet_bind,
1064 .connect = inet_stream_connect,
1065 .socketpair = sock_no_socketpair,
1066 .accept = inet_accept,
1067 .getname = inet_getname,
1068 .poll = tcp_poll,
1069 .ioctl = inet_ioctl,
1070 .gettstamp = sock_gettstamp,
1071 .listen = inet_listen,
1072 .shutdown = inet_shutdown,
1073 .setsockopt = sock_common_setsockopt,
1074 .getsockopt = sock_common_getsockopt,
1075 .sendmsg = inet_sendmsg,
1076 .recvmsg = inet_recvmsg,
1077 #ifdef CONFIG_MMU
1078 .mmap = tcp_mmap,
1079 #endif
1080 .splice_eof = inet_splice_eof,
1081 .splice_read = tcp_splice_read,
1082 .set_peek_off = sk_set_peek_off,
1083 .read_sock = tcp_read_sock,
1084 .read_skb = tcp_read_skb,
1085 .sendmsg_locked = tcp_sendmsg_locked,
1086 .peek_len = tcp_peek_len,
1087 #ifdef CONFIG_COMPAT
1088 .compat_ioctl = inet_compat_ioctl,
1089 #endif
1090 .set_rcvlowat = tcp_set_rcvlowat,
1091 };
1092 EXPORT_SYMBOL(inet_stream_ops);
1093
1094 const struct proto_ops inet_dgram_ops = {
1095 .family = PF_INET,
1096 .owner = THIS_MODULE,
1097 .release = inet_release,
1098 .bind = inet_bind,
1099 .connect = inet_dgram_connect,
1100 .socketpair = sock_no_socketpair,
1101 .accept = sock_no_accept,
1102 .getname = inet_getname,
1103 .poll = udp_poll,
1104 .ioctl = inet_ioctl,
1105 .gettstamp = sock_gettstamp,
1106 .listen = sock_no_listen,
1107 .shutdown = inet_shutdown,
1108 .setsockopt = sock_common_setsockopt,
1109 .getsockopt = sock_common_getsockopt,
1110 .sendmsg = inet_sendmsg,
1111 .read_skb = udp_read_skb,
1112 .recvmsg = inet_recvmsg,
1113 .mmap = sock_no_mmap,
1114 .splice_eof = inet_splice_eof,
1115 .set_peek_off = udp_set_peek_off,
1116 #ifdef CONFIG_COMPAT
1117 .compat_ioctl = inet_compat_ioctl,
1118 #endif
1119 };
1120 EXPORT_SYMBOL(inet_dgram_ops);
1121
1122 /*
1123 * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
1124 * udp_poll
1125 */
1126 static const struct proto_ops inet_sockraw_ops = {
1127 .family = PF_INET,
1128 .owner = THIS_MODULE,
1129 .release = inet_release,
1130 .bind = inet_bind,
1131 .connect = inet_dgram_connect,
1132 .socketpair = sock_no_socketpair,
1133 .accept = sock_no_accept,
1134 .getname = inet_getname,
1135 .poll = datagram_poll,
1136 .ioctl = inet_ioctl,
1137 .gettstamp = sock_gettstamp,
1138 .listen = sock_no_listen,
1139 .shutdown = inet_shutdown,
1140 .setsockopt = sock_common_setsockopt,
1141 .getsockopt = sock_common_getsockopt,
1142 .sendmsg = inet_sendmsg,
1143 .recvmsg = inet_recvmsg,
1144 .mmap = sock_no_mmap,
1145 .splice_eof = inet_splice_eof,
1146 #ifdef CONFIG_COMPAT
1147 .compat_ioctl = inet_compat_ioctl,
1148 #endif
1149 };
1150
1151 static const struct net_proto_family inet_family_ops = {
1152 .family = PF_INET,
1153 .create = inet_create,
1154 .owner = THIS_MODULE,
1155 };
1156
1157 /* Upon startup we insert all the elements in inetsw_array[] into
1158 * the linked list inetsw.
1159 */
1160 static struct inet_protosw inetsw_array[] =
1161 {
1162 {
1163 .type = SOCK_STREAM,
1164 .protocol = IPPROTO_TCP,
1165 .prot = &tcp_prot,
1166 .ops = &inet_stream_ops,
1167 .flags = INET_PROTOSW_PERMANENT |
1168 INET_PROTOSW_ICSK,
1169 },
1170
1171 {
1172 .type = SOCK_DGRAM,
1173 .protocol = IPPROTO_UDP,
1174 .prot = &udp_prot,
1175 .ops = &inet_dgram_ops,
1176 .flags = INET_PROTOSW_PERMANENT,
1177 },
1178
1179 {
1180 .type = SOCK_DGRAM,
1181 .protocol = IPPROTO_ICMP,
1182 .prot = &ping_prot,
1183 .ops = &inet_sockraw_ops,
1184 .flags = INET_PROTOSW_REUSE,
1185 },
1186
1187 {
1188 .type = SOCK_RAW,
1189 .protocol = IPPROTO_IP, /* wild card */
1190 .prot = &raw_prot,
1191 .ops = &inet_sockraw_ops,
1192 .flags = INET_PROTOSW_REUSE,
1193 }
1194 };
1195
1196 #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1197
inet_register_protosw(struct inet_protosw * p)1198 void inet_register_protosw(struct inet_protosw *p)
1199 {
1200 struct list_head *lh;
1201 struct inet_protosw *answer;
1202 int protocol = p->protocol;
1203 struct list_head *last_perm;
1204
1205 spin_lock_bh(&inetsw_lock);
1206
1207 if (p->type >= SOCK_MAX)
1208 goto out_illegal;
1209
1210 /* If we are trying to override a permanent protocol, bail. */
1211 last_perm = &inetsw[p->type];
1212 list_for_each(lh, &inetsw[p->type]) {
1213 answer = list_entry(lh, struct inet_protosw, list);
1214 /* Check only the non-wild match. */
1215 if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1216 break;
1217 if (protocol == answer->protocol)
1218 goto out_permanent;
1219 last_perm = lh;
1220 }
1221
1222 /* Add the new entry after the last permanent entry if any, so that
1223 * the new entry does not override a permanent entry when matched with
1224 * a wild-card protocol. But it is allowed to override any existing
1225 * non-permanent entry. This means that when we remove this entry, the
1226 * system automatically returns to the old behavior.
1227 */
1228 list_add_rcu(&p->list, last_perm);
1229 out:
1230 spin_unlock_bh(&inetsw_lock);
1231
1232 return;
1233
1234 out_permanent:
1235 pr_err("Attempt to override permanent protocol %d\n", protocol);
1236 goto out;
1237
1238 out_illegal:
1239 pr_err("Ignoring attempt to register invalid socket type %d\n",
1240 p->type);
1241 goto out;
1242 }
1243 EXPORT_SYMBOL(inet_register_protosw);
1244
inet_unregister_protosw(struct inet_protosw * p)1245 void inet_unregister_protosw(struct inet_protosw *p)
1246 {
1247 if (INET_PROTOSW_PERMANENT & p->flags) {
1248 pr_err("Attempt to unregister permanent protocol %d\n",
1249 p->protocol);
1250 } else {
1251 spin_lock_bh(&inetsw_lock);
1252 list_del_rcu(&p->list);
1253 spin_unlock_bh(&inetsw_lock);
1254
1255 synchronize_net();
1256 }
1257 }
1258 EXPORT_SYMBOL(inet_unregister_protosw);
1259
inet_sk_reselect_saddr(struct sock * sk)1260 static int inet_sk_reselect_saddr(struct sock *sk)
1261 {
1262 struct inet_sock *inet = inet_sk(sk);
1263 __be32 old_saddr = inet->inet_saddr;
1264 __be32 daddr = inet->inet_daddr;
1265 struct flowi4 *fl4;
1266 struct rtable *rt;
1267 __be32 new_saddr;
1268 struct ip_options_rcu *inet_opt;
1269 int err;
1270
1271 inet_opt = rcu_dereference_protected(inet->inet_opt,
1272 lockdep_sock_is_held(sk));
1273 if (inet_opt && inet_opt->opt.srr)
1274 daddr = inet_opt->opt.faddr;
1275
1276 /* Query new route. */
1277 fl4 = &inet->cork.fl.u.ip4;
1278 rt = ip_route_connect(fl4, daddr, 0, sk->sk_bound_dev_if,
1279 sk->sk_protocol, inet->inet_sport,
1280 inet->inet_dport, sk);
1281 if (IS_ERR(rt))
1282 return PTR_ERR(rt);
1283
1284 new_saddr = fl4->saddr;
1285
1286 if (new_saddr == old_saddr) {
1287 sk_setup_caps(sk, &rt->dst);
1288 return 0;
1289 }
1290
1291 err = inet_bhash2_update_saddr(sk, &new_saddr, AF_INET);
1292 if (err) {
1293 ip_rt_put(rt);
1294 return err;
1295 }
1296
1297 sk_setup_caps(sk, &rt->dst);
1298
1299 if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {
1300 pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1301 __func__, &old_saddr, &new_saddr);
1302 }
1303
1304 /*
1305 * XXX The only one ugly spot where we need to
1306 * XXX really change the sockets identity after
1307 * XXX it has entered the hashes. -DaveM
1308 *
1309 * Besides that, it does not check for connection
1310 * uniqueness. Wait for troubles.
1311 */
1312 return __sk_prot_rehash(sk);
1313 }
1314
inet_sk_rebuild_header(struct sock * sk)1315 int inet_sk_rebuild_header(struct sock *sk)
1316 {
1317 struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0));
1318 struct inet_sock *inet = inet_sk(sk);
1319 __be32 daddr;
1320 struct ip_options_rcu *inet_opt;
1321 struct flowi4 *fl4;
1322 int err;
1323
1324 /* Route is OK, nothing to do. */
1325 if (rt)
1326 return 0;
1327
1328 /* Reroute. */
1329 rcu_read_lock();
1330 inet_opt = rcu_dereference(inet->inet_opt);
1331 daddr = inet->inet_daddr;
1332 if (inet_opt && inet_opt->opt.srr)
1333 daddr = inet_opt->opt.faddr;
1334 rcu_read_unlock();
1335 fl4 = &inet->cork.fl.u.ip4;
1336 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1337 inet->inet_dport, inet->inet_sport,
1338 sk->sk_protocol, ip_sock_rt_tos(sk),
1339 sk->sk_bound_dev_if);
1340 if (!IS_ERR(rt)) {
1341 err = 0;
1342 sk_setup_caps(sk, &rt->dst);
1343 } else {
1344 err = PTR_ERR(rt);
1345
1346 /* Routing failed... */
1347 sk->sk_route_caps = 0;
1348 /*
1349 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1350 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1351 */
1352 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||
1353 sk->sk_state != TCP_SYN_SENT ||
1354 (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1355 (err = inet_sk_reselect_saddr(sk)) != 0)
1356 WRITE_ONCE(sk->sk_err_soft, -err);
1357 }
1358
1359 return err;
1360 }
1361 EXPORT_SYMBOL(inet_sk_rebuild_header);
1362
inet_sk_set_state(struct sock * sk,int state)1363 void inet_sk_set_state(struct sock *sk, int state)
1364 {
1365 trace_inet_sock_set_state(sk, sk->sk_state, state);
1366 sk->sk_state = state;
1367 }
1368 EXPORT_SYMBOL(inet_sk_set_state);
1369
inet_sk_state_store(struct sock * sk,int newstate)1370 void inet_sk_state_store(struct sock *sk, int newstate)
1371 {
1372 trace_inet_sock_set_state(sk, sk->sk_state, newstate);
1373 smp_store_release(&sk->sk_state, newstate);
1374 }
1375
inet_gso_segment(struct sk_buff * skb,netdev_features_t features)1376 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1377 netdev_features_t features)
1378 {
1379 bool udpfrag = false, fixedid = false, gso_partial, encap;
1380 struct sk_buff *segs = ERR_PTR(-EINVAL);
1381 const struct net_offload *ops;
1382 unsigned int offset = 0;
1383 struct iphdr *iph;
1384 int proto, tot_len;
1385 int nhoff;
1386 int ihl;
1387 int id;
1388
1389 skb_reset_network_header(skb);
1390 nhoff = skb_network_header(skb) - skb_mac_header(skb);
1391 if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1392 goto out;
1393
1394 iph = ip_hdr(skb);
1395 ihl = iph->ihl * 4;
1396 if (ihl < sizeof(*iph))
1397 goto out;
1398
1399 id = ntohs(iph->id);
1400 proto = iph->protocol;
1401
1402 /* Warning: after this point, iph might be no longer valid */
1403 if (unlikely(!pskb_may_pull(skb, ihl)))
1404 goto out;
1405 __skb_pull(skb, ihl);
1406
1407 encap = SKB_GSO_CB(skb)->encap_level > 0;
1408 if (encap)
1409 features &= skb->dev->hw_enc_features;
1410 SKB_GSO_CB(skb)->encap_level += ihl;
1411
1412 skb_reset_transport_header(skb);
1413
1414 segs = ERR_PTR(-EPROTONOSUPPORT);
1415
1416 if (!skb->encapsulation || encap) {
1417 udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1418 fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1419
1420 /* fixed ID is invalid if DF bit is not set */
1421 if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1422 goto out;
1423 }
1424
1425 ops = rcu_dereference(inet_offloads[proto]);
1426 if (likely(ops && ops->callbacks.gso_segment)) {
1427 segs = ops->callbacks.gso_segment(skb, features);
1428 if (!segs)
1429 skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
1430 }
1431
1432 if (IS_ERR_OR_NULL(segs))
1433 goto out;
1434
1435 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1436
1437 skb = segs;
1438 do {
1439 iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1440 if (udpfrag) {
1441 iph->frag_off = htons(offset >> 3);
1442 if (skb->next)
1443 iph->frag_off |= htons(IP_MF);
1444 offset += skb->len - nhoff - ihl;
1445 tot_len = skb->len - nhoff;
1446 } else if (skb_is_gso(skb)) {
1447 if (!fixedid) {
1448 iph->id = htons(id);
1449 id += skb_shinfo(skb)->gso_segs;
1450 }
1451
1452 if (gso_partial)
1453 tot_len = skb_shinfo(skb)->gso_size +
1454 SKB_GSO_CB(skb)->data_offset +
1455 skb->head - (unsigned char *)iph;
1456 else
1457 tot_len = skb->len - nhoff;
1458 } else {
1459 if (!fixedid)
1460 iph->id = htons(id++);
1461 tot_len = skb->len - nhoff;
1462 }
1463 iph->tot_len = htons(tot_len);
1464 ip_send_check(iph);
1465 if (encap)
1466 skb_reset_inner_headers(skb);
1467 skb->network_header = (u8 *)iph - skb->head;
1468 skb_reset_mac_len(skb);
1469 } while ((skb = skb->next));
1470
1471 out:
1472 return segs;
1473 }
1474
ipip_gso_segment(struct sk_buff * skb,netdev_features_t features)1475 static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
1476 netdev_features_t features)
1477 {
1478 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
1479 return ERR_PTR(-EINVAL);
1480
1481 return inet_gso_segment(skb, features);
1482 }
1483
inet_gro_receive(struct list_head * head,struct sk_buff * skb)1484 struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
1485 {
1486 const struct net_offload *ops;
1487 struct sk_buff *pp = NULL;
1488 const struct iphdr *iph;
1489 struct sk_buff *p;
1490 unsigned int hlen;
1491 unsigned int off;
1492 int flush = 1;
1493 int proto;
1494
1495 off = skb_gro_offset(skb);
1496 hlen = off + sizeof(*iph);
1497 iph = skb_gro_header(skb, hlen, off);
1498 if (unlikely(!iph))
1499 goto out;
1500
1501 proto = iph->protocol;
1502
1503 ops = rcu_dereference(inet_offloads[proto]);
1504 if (!ops || !ops->callbacks.gro_receive)
1505 goto out;
1506
1507 if (*(u8 *)iph != 0x45)
1508 goto out;
1509
1510 if (ip_is_fragment(iph))
1511 goto out;
1512
1513 if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1514 goto out;
1515
1516 NAPI_GRO_CB(skb)->proto = proto;
1517 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (ntohl(*(__be32 *)&iph->id) & ~IP_DF));
1518
1519 list_for_each_entry(p, head, list) {
1520 struct iphdr *iph2;
1521
1522 if (!NAPI_GRO_CB(p)->same_flow)
1523 continue;
1524
1525 iph2 = (struct iphdr *)(p->data + off);
1526 /* The above works because, with the exception of the top
1527 * (inner most) layer, we only aggregate pkts with the same
1528 * hdr length so all the hdrs we'll need to verify will start
1529 * at the same offset.
1530 */
1531 if ((iph->protocol ^ iph2->protocol) |
1532 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1533 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1534 NAPI_GRO_CB(p)->same_flow = 0;
1535 continue;
1536 }
1537 }
1538
1539 NAPI_GRO_CB(skb)->flush |= flush;
1540 NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off;
1541
1542 /* Note : No need to call skb_gro_postpull_rcsum() here,
1543 * as we already checked checksum over ipv4 header was 0
1544 */
1545 skb_gro_pull(skb, sizeof(*iph));
1546 skb_set_transport_header(skb, skb_gro_offset(skb));
1547
1548 pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
1549 ops->callbacks.gro_receive, head, skb);
1550
1551 out:
1552 skb_gro_flush_final(skb, pp, flush);
1553
1554 return pp;
1555 }
1556
ipip_gro_receive(struct list_head * head,struct sk_buff * skb)1557 static struct sk_buff *ipip_gro_receive(struct list_head *head,
1558 struct sk_buff *skb)
1559 {
1560 if (NAPI_GRO_CB(skb)->encap_mark) {
1561 NAPI_GRO_CB(skb)->flush = 1;
1562 return NULL;
1563 }
1564
1565 NAPI_GRO_CB(skb)->encap_mark = 1;
1566
1567 return inet_gro_receive(head, skb);
1568 }
1569
1570 #define SECONDS_PER_DAY 86400
1571
1572 /* inet_current_timestamp - Return IP network timestamp
1573 *
1574 * Return milliseconds since midnight in network byte order.
1575 */
inet_current_timestamp(void)1576 __be32 inet_current_timestamp(void)
1577 {
1578 u32 secs;
1579 u32 msecs;
1580 struct timespec64 ts;
1581
1582 ktime_get_real_ts64(&ts);
1583
1584 /* Get secs since midnight. */
1585 (void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1586 /* Convert to msecs. */
1587 msecs = secs * MSEC_PER_SEC;
1588 /* Convert nsec to msec. */
1589 msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1590
1591 /* Convert to network byte order. */
1592 return htonl(msecs);
1593 }
1594 EXPORT_SYMBOL(inet_current_timestamp);
1595
inet_recv_error(struct sock * sk,struct msghdr * msg,int len,int * addr_len)1596 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1597 {
1598 unsigned int family = READ_ONCE(sk->sk_family);
1599
1600 if (family == AF_INET)
1601 return ip_recv_error(sk, msg, len, addr_len);
1602 #if IS_ENABLED(CONFIG_IPV6)
1603 if (family == AF_INET6)
1604 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1605 #endif
1606 return -EINVAL;
1607 }
1608 EXPORT_SYMBOL(inet_recv_error);
1609
inet_gro_complete(struct sk_buff * skb,int nhoff)1610 int inet_gro_complete(struct sk_buff *skb, int nhoff)
1611 {
1612 struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1613 const struct net_offload *ops;
1614 __be16 totlen = iph->tot_len;
1615 int proto = iph->protocol;
1616 int err = -ENOSYS;
1617
1618 if (skb->encapsulation) {
1619 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1620 skb_set_inner_network_header(skb, nhoff);
1621 }
1622
1623 iph_set_totlen(iph, skb->len - nhoff);
1624 csum_replace2(&iph->check, totlen, iph->tot_len);
1625
1626 ops = rcu_dereference(inet_offloads[proto]);
1627 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1628 goto out;
1629
1630 /* Only need to add sizeof(*iph) to get to the next hdr below
1631 * because any hdr with option will have been flushed in
1632 * inet_gro_receive().
1633 */
1634 err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
1635 tcp4_gro_complete, udp4_gro_complete,
1636 skb, nhoff + sizeof(*iph));
1637
1638 out:
1639 return err;
1640 }
1641
ipip_gro_complete(struct sk_buff * skb,int nhoff)1642 static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1643 {
1644 skb->encapsulation = 1;
1645 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1646 return inet_gro_complete(skb, nhoff);
1647 }
1648
inet_ctl_sock_create(struct sock ** sk,unsigned short family,unsigned short type,unsigned char protocol,struct net * net)1649 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1650 unsigned short type, unsigned char protocol,
1651 struct net *net)
1652 {
1653 struct socket *sock;
1654 int rc = sock_create_kern(net, family, type, protocol, &sock);
1655
1656 if (rc == 0) {
1657 *sk = sock->sk;
1658 (*sk)->sk_allocation = GFP_ATOMIC;
1659 (*sk)->sk_use_task_frag = false;
1660 /*
1661 * Unhash it so that IP input processing does not even see it,
1662 * we do not wish this socket to see incoming packets.
1663 */
1664 (*sk)->sk_prot->unhash(*sk);
1665 }
1666 return rc;
1667 }
1668 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1669
snmp_fold_field(void __percpu * mib,int offt)1670 unsigned long snmp_fold_field(void __percpu *mib, int offt)
1671 {
1672 unsigned long res = 0;
1673 int i;
1674
1675 for_each_possible_cpu(i)
1676 res += snmp_get_cpu_field(mib, i, offt);
1677 return res;
1678 }
1679 EXPORT_SYMBOL_GPL(snmp_fold_field);
1680
1681 #if BITS_PER_LONG==32
1682
snmp_get_cpu_field64(void __percpu * mib,int cpu,int offt,size_t syncp_offset)1683 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1684 size_t syncp_offset)
1685 {
1686 void *bhptr;
1687 struct u64_stats_sync *syncp;
1688 u64 v;
1689 unsigned int start;
1690
1691 bhptr = per_cpu_ptr(mib, cpu);
1692 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1693 do {
1694 start = u64_stats_fetch_begin(syncp);
1695 v = *(((u64 *)bhptr) + offt);
1696 } while (u64_stats_fetch_retry(syncp, start));
1697
1698 return v;
1699 }
1700 EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1701
snmp_fold_field64(void __percpu * mib,int offt,size_t syncp_offset)1702 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1703 {
1704 u64 res = 0;
1705 int cpu;
1706
1707 for_each_possible_cpu(cpu) {
1708 res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1709 }
1710 return res;
1711 }
1712 EXPORT_SYMBOL_GPL(snmp_fold_field64);
1713 #endif
1714
1715 #ifdef CONFIG_IP_MULTICAST
1716 static const struct net_protocol igmp_protocol = {
1717 .handler = igmp_rcv,
1718 };
1719 #endif
1720
1721 static const struct net_protocol icmp_protocol = {
1722 .handler = icmp_rcv,
1723 .err_handler = icmp_err,
1724 .no_policy = 1,
1725 };
1726
ipv4_mib_init_net(struct net * net)1727 static __net_init int ipv4_mib_init_net(struct net *net)
1728 {
1729 int i;
1730
1731 net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1732 if (!net->mib.tcp_statistics)
1733 goto err_tcp_mib;
1734 net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1735 if (!net->mib.ip_statistics)
1736 goto err_ip_mib;
1737
1738 for_each_possible_cpu(i) {
1739 struct ipstats_mib *af_inet_stats;
1740 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1741 u64_stats_init(&af_inet_stats->syncp);
1742 }
1743
1744 net->mib.net_statistics = alloc_percpu(struct linux_mib);
1745 if (!net->mib.net_statistics)
1746 goto err_net_mib;
1747 net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1748 if (!net->mib.udp_statistics)
1749 goto err_udp_mib;
1750 net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1751 if (!net->mib.udplite_statistics)
1752 goto err_udplite_mib;
1753 net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1754 if (!net->mib.icmp_statistics)
1755 goto err_icmp_mib;
1756 net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1757 GFP_KERNEL);
1758 if (!net->mib.icmpmsg_statistics)
1759 goto err_icmpmsg_mib;
1760
1761 tcp_mib_init(net);
1762 return 0;
1763
1764 err_icmpmsg_mib:
1765 free_percpu(net->mib.icmp_statistics);
1766 err_icmp_mib:
1767 free_percpu(net->mib.udplite_statistics);
1768 err_udplite_mib:
1769 free_percpu(net->mib.udp_statistics);
1770 err_udp_mib:
1771 free_percpu(net->mib.net_statistics);
1772 err_net_mib:
1773 free_percpu(net->mib.ip_statistics);
1774 err_ip_mib:
1775 free_percpu(net->mib.tcp_statistics);
1776 err_tcp_mib:
1777 return -ENOMEM;
1778 }
1779
ipv4_mib_exit_net(struct net * net)1780 static __net_exit void ipv4_mib_exit_net(struct net *net)
1781 {
1782 kfree(net->mib.icmpmsg_statistics);
1783 free_percpu(net->mib.icmp_statistics);
1784 free_percpu(net->mib.udplite_statistics);
1785 free_percpu(net->mib.udp_statistics);
1786 free_percpu(net->mib.net_statistics);
1787 free_percpu(net->mib.ip_statistics);
1788 free_percpu(net->mib.tcp_statistics);
1789 #ifdef CONFIG_MPTCP
1790 /* allocated on demand, see mptcp_init_sock() */
1791 free_percpu(net->mib.mptcp_statistics);
1792 #endif
1793 }
1794
1795 static __net_initdata struct pernet_operations ipv4_mib_ops = {
1796 .init = ipv4_mib_init_net,
1797 .exit = ipv4_mib_exit_net,
1798 };
1799
init_ipv4_mibs(void)1800 static int __init init_ipv4_mibs(void)
1801 {
1802 return register_pernet_subsys(&ipv4_mib_ops);
1803 }
1804
inet_init_net(struct net * net)1805 static __net_init int inet_init_net(struct net *net)
1806 {
1807 /*
1808 * Set defaults for local port range
1809 */
1810 net->ipv4.ip_local_ports.range = 60999u << 16 | 32768u;
1811
1812 seqlock_init(&net->ipv4.ping_group_range.lock);
1813 /*
1814 * Sane defaults - nobody may create ping sockets.
1815 * Boot scripts should set this to distro-specific group.
1816 */
1817 net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1818 net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1819
1820 /* Default values for sysctl-controlled parameters.
1821 * We set them here, in case sysctl is not compiled.
1822 */
1823 net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1824 net->ipv4.sysctl_ip_fwd_update_priority = 1;
1825 net->ipv4.sysctl_ip_dynaddr = 0;
1826 net->ipv4.sysctl_ip_early_demux = 1;
1827 net->ipv4.sysctl_udp_early_demux = 1;
1828 net->ipv4.sysctl_tcp_early_demux = 1;
1829 net->ipv4.sysctl_nexthop_compat_mode = 1;
1830 #ifdef CONFIG_SYSCTL
1831 net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1832 #endif
1833
1834 /* Some igmp sysctl, whose values are always used */
1835 net->ipv4.sysctl_igmp_max_memberships = 20;
1836 net->ipv4.sysctl_igmp_max_msf = 10;
1837 /* IGMP reports for link-local multicast groups are enabled by default */
1838 net->ipv4.sysctl_igmp_llm_reports = 1;
1839 net->ipv4.sysctl_igmp_qrv = 2;
1840
1841 net->ipv4.sysctl_fib_notify_on_flag_change = 0;
1842
1843 return 0;
1844 }
1845
1846 static __net_initdata struct pernet_operations af_inet_ops = {
1847 .init = inet_init_net,
1848 };
1849
init_inet_pernet_ops(void)1850 static int __init init_inet_pernet_ops(void)
1851 {
1852 return register_pernet_subsys(&af_inet_ops);
1853 }
1854
1855 static int ipv4_proc_init(void);
1856
1857 /*
1858 * IP protocol layer initialiser
1859 */
1860
1861
1862 static const struct net_offload ipip_offload = {
1863 .callbacks = {
1864 .gso_segment = ipip_gso_segment,
1865 .gro_receive = ipip_gro_receive,
1866 .gro_complete = ipip_gro_complete,
1867 },
1868 };
1869
ipip_offload_init(void)1870 static int __init ipip_offload_init(void)
1871 {
1872 return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1873 }
1874
ipv4_offload_init(void)1875 static int __init ipv4_offload_init(void)
1876 {
1877 /*
1878 * Add offloads
1879 */
1880 if (udpv4_offload_init() < 0)
1881 pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1882 if (tcpv4_offload_init() < 0)
1883 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1884 if (ipip_offload_init() < 0)
1885 pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1886
1887 net_hotdata.ip_packet_offload = (struct packet_offload) {
1888 .type = cpu_to_be16(ETH_P_IP),
1889 .callbacks = {
1890 .gso_segment = inet_gso_segment,
1891 .gro_receive = inet_gro_receive,
1892 .gro_complete = inet_gro_complete,
1893 },
1894 };
1895 dev_add_offload(&net_hotdata.ip_packet_offload);
1896 return 0;
1897 }
1898
1899 fs_initcall(ipv4_offload_init);
1900
1901 static struct packet_type ip_packet_type __read_mostly = {
1902 .type = cpu_to_be16(ETH_P_IP),
1903 .func = ip_rcv,
1904 .list_func = ip_list_rcv,
1905 };
1906
inet_init(void)1907 static int __init inet_init(void)
1908 {
1909 struct inet_protosw *q;
1910 struct list_head *r;
1911 int rc;
1912
1913 sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1914
1915 raw_hashinfo_init(&raw_v4_hashinfo);
1916
1917 rc = proto_register(&tcp_prot, 1);
1918 if (rc)
1919 goto out;
1920
1921 rc = proto_register(&udp_prot, 1);
1922 if (rc)
1923 goto out_unregister_tcp_proto;
1924
1925 rc = proto_register(&raw_prot, 1);
1926 if (rc)
1927 goto out_unregister_udp_proto;
1928
1929 rc = proto_register(&ping_prot, 1);
1930 if (rc)
1931 goto out_unregister_raw_proto;
1932
1933 /*
1934 * Tell SOCKET that we are alive...
1935 */
1936
1937 (void)sock_register(&inet_family_ops);
1938
1939 #ifdef CONFIG_SYSCTL
1940 ip_static_sysctl_init();
1941 #endif
1942
1943 /*
1944 * Add all the base protocols.
1945 */
1946
1947 if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1948 pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1949
1950 net_hotdata.udp_protocol = (struct net_protocol) {
1951 .handler = udp_rcv,
1952 .err_handler = udp_err,
1953 .no_policy = 1,
1954 };
1955 if (inet_add_protocol(&net_hotdata.udp_protocol, IPPROTO_UDP) < 0)
1956 pr_crit("%s: Cannot add UDP protocol\n", __func__);
1957
1958 net_hotdata.tcp_protocol = (struct net_protocol) {
1959 .handler = tcp_v4_rcv,
1960 .err_handler = tcp_v4_err,
1961 .no_policy = 1,
1962 .icmp_strict_tag_validation = 1,
1963 };
1964 if (inet_add_protocol(&net_hotdata.tcp_protocol, IPPROTO_TCP) < 0)
1965 pr_crit("%s: Cannot add TCP protocol\n", __func__);
1966 #ifdef CONFIG_IP_MULTICAST
1967 if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1968 pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1969 #endif
1970
1971 /* Register the socket-side information for inet_create. */
1972 for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1973 INIT_LIST_HEAD(r);
1974
1975 for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1976 inet_register_protosw(q);
1977
1978 /*
1979 * Set the ARP module up
1980 */
1981
1982 arp_init();
1983
1984 /*
1985 * Set the IP module up
1986 */
1987
1988 ip_init();
1989
1990 /* Initialise per-cpu ipv4 mibs */
1991 if (init_ipv4_mibs())
1992 panic("%s: Cannot init ipv4 mibs\n", __func__);
1993
1994 /* Setup TCP slab cache for open requests. */
1995 tcp_init();
1996
1997 /* Setup UDP memory threshold */
1998 udp_init();
1999
2000 /* Add UDP-Lite (RFC 3828) */
2001 udplite4_register();
2002
2003 raw_init();
2004
2005 ping_init();
2006
2007 /*
2008 * Set the ICMP layer up
2009 */
2010
2011 if (icmp_init() < 0)
2012 panic("Failed to create the ICMP control socket.\n");
2013
2014 /*
2015 * Initialise the multicast router
2016 */
2017 #if defined(CONFIG_IP_MROUTE)
2018 if (ip_mr_init())
2019 pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
2020 #endif
2021
2022 if (init_inet_pernet_ops())
2023 pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
2024
2025 ipv4_proc_init();
2026
2027 ipfrag_init();
2028
2029 dev_add_pack(&ip_packet_type);
2030
2031 ip_tunnel_core_init();
2032
2033 rc = 0;
2034 out:
2035 return rc;
2036 out_unregister_raw_proto:
2037 proto_unregister(&raw_prot);
2038 out_unregister_udp_proto:
2039 proto_unregister(&udp_prot);
2040 out_unregister_tcp_proto:
2041 proto_unregister(&tcp_prot);
2042 goto out;
2043 }
2044
2045 fs_initcall(inet_init);
2046
2047 /* ------------------------------------------------------------------------ */
2048
2049 #ifdef CONFIG_PROC_FS
ipv4_proc_init(void)2050 static int __init ipv4_proc_init(void)
2051 {
2052 int rc = 0;
2053
2054 if (raw_proc_init())
2055 goto out_raw;
2056 if (tcp4_proc_init())
2057 goto out_tcp;
2058 if (udp4_proc_init())
2059 goto out_udp;
2060 if (ping_proc_init())
2061 goto out_ping;
2062 if (ip_misc_proc_init())
2063 goto out_misc;
2064 out:
2065 return rc;
2066 out_misc:
2067 ping_proc_exit();
2068 out_ping:
2069 udp4_proc_exit();
2070 out_udp:
2071 tcp4_proc_exit();
2072 out_tcp:
2073 raw_proc_exit();
2074 out_raw:
2075 rc = -ENOMEM;
2076 goto out;
2077 }
2078
2079 #else /* CONFIG_PROC_FS */
ipv4_proc_init(void)2080 static int __init ipv4_proc_init(void)
2081 {
2082 return 0;
2083 }
2084 #endif /* CONFIG_PROC_FS */
2085