• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		PF_INET protocol family socket handler.
8  *
9  * Authors:	Ross Biro
10  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *		Florian La Roche, <flla@stud.uni-sb.de>
12  *		Alan Cox, <A.Cox@swansea.ac.uk>
13  *
14  * Changes (see also sock.c)
15  *
16  *		piggy,
17  *		Karl Knutson	:	Socket protocol table
18  *		A.N.Kuznetsov	:	Socket death error in accept().
19  *		John Richardson :	Fix non blocking error in connect()
20  *					so sockets that fail to connect
21  *					don't return -EINPROGRESS.
22  *		Alan Cox	:	Asynchronous I/O support
23  *		Alan Cox	:	Keep correct socket pointer on sock
24  *					structures
25  *					when accept() ed
26  *		Alan Cox	:	Semantics of SO_LINGER aren't state
27  *					moved to close when you look carefully.
28  *					With this fixed and the accept bug fixed
29  *					some RPC stuff seems happier.
30  *		Niibe Yutaka	:	4.4BSD style write async I/O
31  *		Alan Cox,
32  *		Tony Gale 	:	Fixed reuse semantics.
33  *		Alan Cox	:	bind() shouldn't abort existing but dead
34  *					sockets. Stops FTP netin:.. I hope.
35  *		Alan Cox	:	bind() works correctly for RAW sockets.
36  *					Note that FreeBSD at least was broken
37  *					in this respect so be careful with
38  *					compatibility tests...
39  *		Alan Cox	:	routing cache support
40  *		Alan Cox	:	memzero the socket structure for
41  *					compactness.
42  *		Matt Day	:	nonblock connect error handler
43  *		Alan Cox	:	Allow large numbers of pending sockets
44  *					(eg for big web sites), but only if
45  *					specifically application requested.
46  *		Alan Cox	:	New buffering throughout IP. Used
47  *					dumbly.
48  *		Alan Cox	:	New buffering now used smartly.
49  *		Alan Cox	:	BSD rather than common sense
50  *					interpretation of listen.
51  *		Germano Caronni	:	Assorted small races.
52  *		Alan Cox	:	sendmsg/recvmsg basic support.
53  *		Alan Cox	:	Only sendmsg/recvmsg now supported.
54  *		Alan Cox	:	Locked down bind (see security list).
55  *		Alan Cox	:	Loosened bind a little.
56  *		Mike McLagan	:	ADD/DEL DLCI Ioctls
57  *	Willy Konynenberg	:	Transparent proxying support.
58  *		David S. Miller	:	New socket lookup architecture.
59  *					Some other random speedups.
60  *		Cyrus Durgin	:	Cleaned up file for kmod hacks.
61  *		Andi Kleen	:	Fix inet_stream_connect TCP race.
62  */
63 
64 #define pr_fmt(fmt) "IPv4: " fmt
65 
66 #include <linux/err.h>
67 #include <linux/errno.h>
68 #include <linux/types.h>
69 #include <linux/socket.h>
70 #include <linux/in.h>
71 #include <linux/kernel.h>
72 #include <linux/kmod.h>
73 #include <linux/sched.h>
74 #include <linux/timer.h>
75 #include <linux/string.h>
76 #include <linux/sockios.h>
77 #include <linux/net.h>
78 #include <linux/capability.h>
79 #include <linux/fcntl.h>
80 #include <linux/mm.h>
81 #include <linux/interrupt.h>
82 #include <linux/stat.h>
83 #include <linux/init.h>
84 #include <linux/poll.h>
85 #include <linux/netfilter_ipv4.h>
86 #include <linux/random.h>
87 #include <linux/slab.h>
88 
89 #include <linux/uaccess.h>
90 
91 #include <linux/inet.h>
92 #include <linux/igmp.h>
93 #include <linux/inetdevice.h>
94 #include <linux/netdevice.h>
95 #include <net/checksum.h>
96 #include <net/ip.h>
97 #include <net/protocol.h>
98 #include <net/arp.h>
99 #include <net/route.h>
100 #include <net/ip_fib.h>
101 #include <net/inet_connection_sock.h>
102 #include <net/tcp.h>
103 #include <net/udp.h>
104 #include <net/udplite.h>
105 #include <net/ping.h>
106 #include <linux/skbuff.h>
107 #include <net/sock.h>
108 #include <net/raw.h>
109 #include <net/icmp.h>
110 #include <net/inet_common.h>
111 #include <net/ip_tunnels.h>
112 #include <net/xfrm.h>
113 #include <net/net_namespace.h>
114 #include <net/secure_seq.h>
115 #ifdef CONFIG_IP_MROUTE
116 #include <linux/mroute.h>
117 #endif
118 #include <net/l3mdev.h>
119 #include <net/compat.h>
120 
121 #include <trace/events/sock.h>
122 
123 /* The inetsw table contains everything that inet_create needs to
124  * build a new socket.
125  */
126 static struct list_head inetsw[SOCK_MAX];
127 static DEFINE_SPINLOCK(inetsw_lock);
128 
129 /* New destruction routine */
130 
inet_sock_destruct(struct sock * sk)131 void inet_sock_destruct(struct sock *sk)
132 {
133 	struct inet_sock *inet = inet_sk(sk);
134 
135 	__skb_queue_purge(&sk->sk_receive_queue);
136 	if (sk->sk_rx_skb_cache) {
137 		__kfree_skb(sk->sk_rx_skb_cache);
138 		sk->sk_rx_skb_cache = NULL;
139 	}
140 	__skb_queue_purge(&sk->sk_error_queue);
141 
142 	sk_mem_reclaim(sk);
143 
144 	if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
145 		pr_err("Attempt to release TCP socket in state %d %p\n",
146 		       sk->sk_state, sk);
147 		return;
148 	}
149 	if (!sock_flag(sk, SOCK_DEAD)) {
150 		pr_err("Attempt to release alive inet socket %p\n", sk);
151 		return;
152 	}
153 
154 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
155 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
156 	WARN_ON(sk->sk_wmem_queued);
157 	WARN_ON(sk->sk_forward_alloc);
158 
159 	kfree(rcu_dereference_protected(inet->inet_opt, 1));
160 	dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
161 	dst_release(rcu_dereference_protected(sk->sk_rx_dst, 1));
162 	sk_refcnt_debug_dec(sk);
163 }
164 EXPORT_SYMBOL(inet_sock_destruct);
165 
166 /*
167  *	The routines beyond this point handle the behaviour of an AF_INET
168  *	socket object. Mostly it punts to the subprotocols of IP to do
169  *	the work.
170  */
171 
172 /*
173  *	Automatically bind an unbound socket.
174  */
175 
inet_autobind(struct sock * sk)176 static int inet_autobind(struct sock *sk)
177 {
178 	struct inet_sock *inet;
179 	/* We may need to bind the socket. */
180 	lock_sock(sk);
181 	inet = inet_sk(sk);
182 	if (!inet->inet_num) {
183 		if (sk->sk_prot->get_port(sk, 0)) {
184 			release_sock(sk);
185 			return -EAGAIN;
186 		}
187 		inet->inet_sport = htons(inet->inet_num);
188 	}
189 	release_sock(sk);
190 	return 0;
191 }
192 
193 /*
194  *	Move a socket into listening state.
195  */
inet_listen(struct socket * sock,int backlog)196 int inet_listen(struct socket *sock, int backlog)
197 {
198 	struct sock *sk = sock->sk;
199 	unsigned char old_state;
200 	int err, tcp_fastopen;
201 
202 	lock_sock(sk);
203 
204 	err = -EINVAL;
205 	if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM)
206 		goto out;
207 
208 	old_state = sk->sk_state;
209 	if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
210 		goto out;
211 
212 	WRITE_ONCE(sk->sk_max_ack_backlog, backlog);
213 	/* Really, if the socket is already in listen state
214 	 * we can only allow the backlog to be adjusted.
215 	 */
216 	if (old_state != TCP_LISTEN) {
217 		/* Enable TFO w/o requiring TCP_FASTOPEN socket option.
218 		 * Note that only TCP sockets (SOCK_STREAM) will reach here.
219 		 * Also fastopen backlog may already been set via the option
220 		 * because the socket was in TCP_LISTEN state previously but
221 		 * was shutdown() rather than close().
222 		 */
223 		tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
224 		if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) &&
225 		    (tcp_fastopen & TFO_SERVER_ENABLE) &&
226 		    !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
227 			fastopen_queue_tune(sk, backlog);
228 			tcp_fastopen_init_key_once(sock_net(sk));
229 		}
230 
231 		err = inet_csk_listen_start(sk, backlog);
232 		if (err)
233 			goto out;
234 		tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
235 	}
236 	err = 0;
237 
238 out:
239 	release_sock(sk);
240 	return err;
241 }
242 EXPORT_SYMBOL(inet_listen);
243 
244 /*
245  *	Create an inet socket.
246  */
247 
inet_create(struct net * net,struct socket * sock,int protocol,int kern)248 static int inet_create(struct net *net, struct socket *sock, int protocol,
249 		       int kern)
250 {
251 	struct sock *sk;
252 	struct inet_protosw *answer;
253 	struct inet_sock *inet;
254 	struct proto *answer_prot;
255 	unsigned char answer_flags;
256 	int try_loading_module = 0;
257 	int err;
258 
259 	if (protocol < 0 || protocol >= IPPROTO_MAX)
260 		return -EINVAL;
261 
262 	sock->state = SS_UNCONNECTED;
263 
264 	/* Look for the requested type/protocol pair. */
265 lookup_protocol:
266 	err = -ESOCKTNOSUPPORT;
267 	rcu_read_lock();
268 	list_for_each_entry_rcu(answer, &inetsw[sock->type], list) {
269 
270 		err = 0;
271 		/* Check the non-wild match. */
272 		if (protocol == answer->protocol) {
273 			if (protocol != IPPROTO_IP)
274 				break;
275 		} else {
276 			/* Check for the two wild cases. */
277 			if (IPPROTO_IP == protocol) {
278 				protocol = answer->protocol;
279 				break;
280 			}
281 			if (IPPROTO_IP == answer->protocol)
282 				break;
283 		}
284 		err = -EPROTONOSUPPORT;
285 	}
286 
287 	if (unlikely(err)) {
288 		if (try_loading_module < 2) {
289 			rcu_read_unlock();
290 			/*
291 			 * Be more specific, e.g. net-pf-2-proto-132-type-1
292 			 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM)
293 			 */
294 			if (++try_loading_module == 1)
295 				request_module("net-pf-%d-proto-%d-type-%d",
296 					       PF_INET, protocol, sock->type);
297 			/*
298 			 * Fall back to generic, e.g. net-pf-2-proto-132
299 			 * (net-pf-PF_INET-proto-IPPROTO_SCTP)
300 			 */
301 			else
302 				request_module("net-pf-%d-proto-%d",
303 					       PF_INET, protocol);
304 			goto lookup_protocol;
305 		} else
306 			goto out_rcu_unlock;
307 	}
308 
309 	err = -EPERM;
310 	if (sock->type == SOCK_RAW && !kern &&
311 	    !ns_capable(net->user_ns, CAP_NET_RAW))
312 		goto out_rcu_unlock;
313 
314 	sock->ops = answer->ops;
315 	answer_prot = answer->prot;
316 	answer_flags = answer->flags;
317 	rcu_read_unlock();
318 
319 	WARN_ON(!answer_prot->slab);
320 
321 	err = -ENOMEM;
322 	sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot, kern);
323 	if (!sk)
324 		goto out;
325 
326 	err = 0;
327 	if (INET_PROTOSW_REUSE & answer_flags)
328 		sk->sk_reuse = SK_CAN_REUSE;
329 
330 	if (INET_PROTOSW_ICSK & answer_flags)
331 		inet_init_csk_locks(sk);
332 
333 	inet = inet_sk(sk);
334 	inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
335 
336 	inet->nodefrag = 0;
337 
338 	if (SOCK_RAW == sock->type) {
339 		inet->inet_num = protocol;
340 		if (IPPROTO_RAW == protocol)
341 			inet->hdrincl = 1;
342 	}
343 
344 	if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc))
345 		inet->pmtudisc = IP_PMTUDISC_DONT;
346 	else
347 		inet->pmtudisc = IP_PMTUDISC_WANT;
348 
349 	inet->inet_id = 0;
350 
351 	sock_init_data(sock, sk);
352 
353 	sk->sk_destruct	   = inet_sock_destruct;
354 	sk->sk_protocol	   = protocol;
355 	sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
356 
357 	inet->uc_ttl	= -1;
358 	inet->mc_loop	= 1;
359 	inet->mc_ttl	= 1;
360 	inet->mc_all	= 1;
361 	inet->mc_index	= 0;
362 	inet->mc_list	= NULL;
363 	inet->rcv_tos	= 0;
364 
365 	sk_refcnt_debug_inc(sk);
366 
367 	if (inet->inet_num) {
368 		/* It assumes that any protocol which allows
369 		 * the user to assign a number at socket
370 		 * creation time automatically
371 		 * shares.
372 		 */
373 		inet->inet_sport = htons(inet->inet_num);
374 		/* Add to protocol hash chains. */
375 		err = sk->sk_prot->hash(sk);
376 		if (err) {
377 			sk_common_release(sk);
378 			goto out;
379 		}
380 	}
381 
382 	if (sk->sk_prot->init) {
383 		err = sk->sk_prot->init(sk);
384 		if (err) {
385 			sk_common_release(sk);
386 			goto out;
387 		}
388 	}
389 
390 	if (!kern) {
391 		err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk);
392 		if (err) {
393 			sk_common_release(sk);
394 			goto out;
395 		}
396 	}
397 out:
398 	return err;
399 out_rcu_unlock:
400 	rcu_read_unlock();
401 	goto out;
402 }
403 
404 
405 /*
406  *	The peer socket should always be NULL (or else). When we call this
407  *	function we are destroying the object and from then on nobody
408  *	should refer to it.
409  */
inet_release(struct socket * sock)410 int inet_release(struct socket *sock)
411 {
412 	struct sock *sk = sock->sk;
413 
414 	if (sk) {
415 		long timeout;
416 
417 		if (!sk->sk_kern_sock)
418 			BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
419 
420 		/* Applications forget to leave groups before exiting */
421 		ip_mc_drop_socket(sk);
422 
423 		/* If linger is set, we don't return until the close
424 		 * is complete.  Otherwise we return immediately. The
425 		 * actually closing is done the same either way.
426 		 *
427 		 * If the close is due to the process exiting, we never
428 		 * linger..
429 		 */
430 		timeout = 0;
431 		if (sock_flag(sk, SOCK_LINGER) &&
432 		    !(current->flags & PF_EXITING))
433 			timeout = sk->sk_lingertime;
434 		sk->sk_prot->close(sk, timeout);
435 		sock->sk = NULL;
436 	}
437 	return 0;
438 }
439 EXPORT_SYMBOL(inet_release);
440 
inet_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)441 int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
442 {
443 	struct sock *sk = sock->sk;
444 	u32 flags = BIND_WITH_LOCK;
445 	int err;
446 
447 	/* If the socket has its own bind function then use it. (RAW) */
448 	if (sk->sk_prot->bind) {
449 		return sk->sk_prot->bind(sk, uaddr, addr_len);
450 	}
451 	if (addr_len < sizeof(struct sockaddr_in))
452 		return -EINVAL;
453 
454 	/* BPF prog is run before any checks are done so that if the prog
455 	 * changes context in a wrong way it will be caught.
456 	 */
457 	err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr,
458 						 CGROUP_INET4_BIND, &flags);
459 	if (err)
460 		return err;
461 
462 	return __inet_bind(sk, uaddr, addr_len, flags);
463 }
464 EXPORT_SYMBOL(inet_bind);
465 
__inet_bind(struct sock * sk,struct sockaddr * uaddr,int addr_len,u32 flags)466 int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
467 		u32 flags)
468 {
469 	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
470 	struct inet_sock *inet = inet_sk(sk);
471 	struct net *net = sock_net(sk);
472 	unsigned short snum;
473 	int chk_addr_ret;
474 	u32 tb_id = RT_TABLE_LOCAL;
475 	int err;
476 
477 	if (addr->sin_family != AF_INET) {
478 		/* Compatibility games : accept AF_UNSPEC (mapped to AF_INET)
479 		 * only if s_addr is INADDR_ANY.
480 		 */
481 		err = -EAFNOSUPPORT;
482 		if (addr->sin_family != AF_UNSPEC ||
483 		    addr->sin_addr.s_addr != htonl(INADDR_ANY))
484 			goto out;
485 	}
486 
487 	tb_id = l3mdev_fib_table_by_index(net, sk->sk_bound_dev_if) ? : tb_id;
488 	chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
489 
490 	/* Not specified by any standard per-se, however it breaks too
491 	 * many applications when removed.  It is unfortunate since
492 	 * allowing applications to make a non-local bind solves
493 	 * several problems with systems using dynamic addressing.
494 	 * (ie. your servers still start up even if your ISDN link
495 	 *  is temporarily down)
496 	 */
497 	err = -EADDRNOTAVAIL;
498 	if (!inet_can_nonlocal_bind(net, inet) &&
499 	    addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
500 	    chk_addr_ret != RTN_LOCAL &&
501 	    chk_addr_ret != RTN_MULTICAST &&
502 	    chk_addr_ret != RTN_BROADCAST)
503 		goto out;
504 
505 	snum = ntohs(addr->sin_port);
506 	err = -EPERM;
507 	if (snum && inet_is_local_unbindable_port(net, snum))
508 		goto out;
509 
510 	err = -EACCES;
511 	if (!(flags & BIND_NO_CAP_NET_BIND_SERVICE) &&
512 	    snum && inet_port_requires_bind_service(net, snum) &&
513 	    !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
514 		goto out;
515 
516 	/*      We keep a pair of addresses. rcv_saddr is the one
517 	 *      used by hash lookups, and saddr is used for transmit.
518 	 *
519 	 *      In the BSD API these are the same except where it
520 	 *      would be illegal to use them (multicast/broadcast) in
521 	 *      which case the sending device address is used.
522 	 */
523 	if (flags & BIND_WITH_LOCK)
524 		lock_sock(sk);
525 
526 	/* Check these errors (active socket, double bind). */
527 	err = -EINVAL;
528 	if (sk->sk_state != TCP_CLOSE || inet->inet_num)
529 		goto out_release_sock;
530 
531 	inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
532 	if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
533 		inet->inet_saddr = 0;  /* Use device */
534 
535 	/* Make sure we are allowed to bind here. */
536 	if (snum || !(inet->bind_address_no_port ||
537 		      (flags & BIND_FORCE_ADDRESS_NO_PORT))) {
538 		if (sk->sk_prot->get_port(sk, snum)) {
539 			inet->inet_saddr = inet->inet_rcv_saddr = 0;
540 			err = -EADDRINUSE;
541 			goto out_release_sock;
542 		}
543 		if (!(flags & BIND_FROM_BPF)) {
544 			err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
545 			if (err) {
546 				inet->inet_saddr = inet->inet_rcv_saddr = 0;
547 				goto out_release_sock;
548 			}
549 		}
550 	}
551 
552 	if (inet->inet_rcv_saddr)
553 		sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
554 	if (snum)
555 		sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
556 	inet->inet_sport = htons(inet->inet_num);
557 	inet->inet_daddr = 0;
558 	inet->inet_dport = 0;
559 	sk_dst_reset(sk);
560 	err = 0;
561 out_release_sock:
562 	if (flags & BIND_WITH_LOCK)
563 		release_sock(sk);
564 out:
565 	return err;
566 }
567 
inet_dgram_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags)568 int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
569 		       int addr_len, int flags)
570 {
571 	struct sock *sk = sock->sk;
572 	int err;
573 
574 	if (addr_len < sizeof(uaddr->sa_family))
575 		return -EINVAL;
576 	if (uaddr->sa_family == AF_UNSPEC)
577 		return sk->sk_prot->disconnect(sk, flags);
578 
579 	if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
580 		err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
581 		if (err)
582 			return err;
583 	}
584 
585 	if (data_race(!inet_sk(sk)->inet_num) && inet_autobind(sk))
586 		return -EAGAIN;
587 	return sk->sk_prot->connect(sk, uaddr, addr_len);
588 }
589 EXPORT_SYMBOL(inet_dgram_connect);
590 
inet_wait_for_connect(struct sock * sk,long timeo,int writebias)591 static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
592 {
593 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
594 
595 	add_wait_queue(sk_sleep(sk), &wait);
596 	sk->sk_write_pending += writebias;
597 
598 	/* Basic assumption: if someone sets sk->sk_err, he _must_
599 	 * change state of the socket from TCP_SYN_*.
600 	 * Connect() does not allow to get error notifications
601 	 * without closing the socket.
602 	 */
603 	while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
604 		release_sock(sk);
605 		timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
606 		lock_sock(sk);
607 		if (signal_pending(current) || !timeo)
608 			break;
609 	}
610 	remove_wait_queue(sk_sleep(sk), &wait);
611 	sk->sk_write_pending -= writebias;
612 	return timeo;
613 }
614 
615 /*
616  *	Connect to a remote host. There is regrettably still a little
617  *	TCP 'magic' in here.
618  */
__inet_stream_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags,int is_sendmsg)619 int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
620 			  int addr_len, int flags, int is_sendmsg)
621 {
622 	struct sock *sk = sock->sk;
623 	int err;
624 	long timeo;
625 
626 	/*
627 	 * uaddr can be NULL and addr_len can be 0 if:
628 	 * sk is a TCP fastopen active socket and
629 	 * TCP_FASTOPEN_CONNECT sockopt is set and
630 	 * we already have a valid cookie for this socket.
631 	 * In this case, user can call write() after connect().
632 	 * write() will invoke tcp_sendmsg_fastopen() which calls
633 	 * __inet_stream_connect().
634 	 */
635 	if (uaddr) {
636 		if (addr_len < sizeof(uaddr->sa_family))
637 			return -EINVAL;
638 
639 		if (uaddr->sa_family == AF_UNSPEC) {
640 			err = sk->sk_prot->disconnect(sk, flags);
641 			sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
642 			goto out;
643 		}
644 	}
645 
646 	switch (sock->state) {
647 	default:
648 		err = -EINVAL;
649 		goto out;
650 	case SS_CONNECTED:
651 		err = -EISCONN;
652 		goto out;
653 	case SS_CONNECTING:
654 		if (inet_sk(sk)->defer_connect)
655 			err = is_sendmsg ? -EINPROGRESS : -EISCONN;
656 		else
657 			err = -EALREADY;
658 		/* Fall out of switch with err, set for this state */
659 		break;
660 	case SS_UNCONNECTED:
661 		err = -EISCONN;
662 		if (sk->sk_state != TCP_CLOSE)
663 			goto out;
664 
665 		if (BPF_CGROUP_PRE_CONNECT_ENABLED(sk)) {
666 			err = sk->sk_prot->pre_connect(sk, uaddr, addr_len);
667 			if (err)
668 				goto out;
669 		}
670 
671 		err = sk->sk_prot->connect(sk, uaddr, addr_len);
672 		if (err < 0)
673 			goto out;
674 
675 		sock->state = SS_CONNECTING;
676 
677 		if (!err && inet_sk(sk)->defer_connect)
678 			goto out;
679 
680 		/* Just entered SS_CONNECTING state; the only
681 		 * difference is that return value in non-blocking
682 		 * case is EINPROGRESS, rather than EALREADY.
683 		 */
684 		err = -EINPROGRESS;
685 		break;
686 	}
687 
688 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
689 
690 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
691 		int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
692 				tcp_sk(sk)->fastopen_req &&
693 				tcp_sk(sk)->fastopen_req->data ? 1 : 0;
694 
695 		/* Error code is set above */
696 		if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
697 			goto out;
698 
699 		err = sock_intr_errno(timeo);
700 		if (signal_pending(current))
701 			goto out;
702 	}
703 
704 	/* Connection was closed by RST, timeout, ICMP error
705 	 * or another process disconnected us.
706 	 */
707 	if (sk->sk_state == TCP_CLOSE)
708 		goto sock_error;
709 
710 	/* sk->sk_err may be not zero now, if RECVERR was ordered by user
711 	 * and error was received after socket entered established state.
712 	 * Hence, it is handled normally after connect() return successfully.
713 	 */
714 
715 	sock->state = SS_CONNECTED;
716 	err = 0;
717 out:
718 	return err;
719 
720 sock_error:
721 	err = sock_error(sk) ? : -ECONNABORTED;
722 	sock->state = SS_UNCONNECTED;
723 	if (sk->sk_prot->disconnect(sk, flags))
724 		sock->state = SS_DISCONNECTING;
725 	goto out;
726 }
727 EXPORT_SYMBOL(__inet_stream_connect);
728 
inet_stream_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags)729 int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
730 			int addr_len, int flags)
731 {
732 	int err;
733 
734 	lock_sock(sock->sk);
735 	err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
736 	release_sock(sock->sk);
737 	return err;
738 }
739 EXPORT_SYMBOL(inet_stream_connect);
740 
741 /*
742  *	Accept a pending connection. The TCP layer now gives BSD semantics.
743  */
744 
inet_accept(struct socket * sock,struct socket * newsock,int flags,bool kern)745 int inet_accept(struct socket *sock, struct socket *newsock, int flags,
746 		bool kern)
747 {
748 	struct sock *sk1 = sock->sk;
749 	int err = -EINVAL;
750 	struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err, kern);
751 
752 	if (!sk2)
753 		goto do_err;
754 
755 	lock_sock(sk2);
756 
757 	sock_rps_record_flow(sk2);
758 	WARN_ON(!((1 << sk2->sk_state) &
759 		  (TCPF_ESTABLISHED | TCPF_SYN_RECV |
760 		  TCPF_CLOSE_WAIT | TCPF_CLOSE)));
761 
762 	sock_graft(sk2, newsock);
763 
764 	newsock->state = SS_CONNECTED;
765 	err = 0;
766 	release_sock(sk2);
767 do_err:
768 	return err;
769 }
770 EXPORT_SYMBOL(inet_accept);
771 
772 /*
773  *	This does both peername and sockname.
774  */
inet_getname(struct socket * sock,struct sockaddr * uaddr,int peer)775 int inet_getname(struct socket *sock, struct sockaddr *uaddr,
776 		 int peer)
777 {
778 	struct sock *sk		= sock->sk;
779 	struct inet_sock *inet	= inet_sk(sk);
780 	DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr);
781 
782 	sin->sin_family = AF_INET;
783 	lock_sock(sk);
784 	if (peer) {
785 		if (!inet->inet_dport ||
786 		    (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) &&
787 		     peer == 1)) {
788 			release_sock(sk);
789 			return -ENOTCONN;
790 		}
791 		sin->sin_port = inet->inet_dport;
792 		sin->sin_addr.s_addr = inet->inet_daddr;
793 		BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
794 				       CGROUP_INET4_GETPEERNAME);
795 	} else {
796 		__be32 addr = inet->inet_rcv_saddr;
797 		if (!addr)
798 			addr = inet->inet_saddr;
799 		sin->sin_port = inet->inet_sport;
800 		sin->sin_addr.s_addr = addr;
801 		BPF_CGROUP_RUN_SA_PROG(sk, (struct sockaddr *)sin,
802 				       CGROUP_INET4_GETSOCKNAME);
803 	}
804 	release_sock(sk);
805 	memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
806 	return sizeof(*sin);
807 }
808 EXPORT_SYMBOL(inet_getname);
809 
inet_send_prepare(struct sock * sk)810 int inet_send_prepare(struct sock *sk)
811 {
812 	sock_rps_record_flow(sk);
813 
814 	/* We may need to bind the socket. */
815 	if (data_race(!inet_sk(sk)->inet_num) && !sk->sk_prot->no_autobind &&
816 	    inet_autobind(sk))
817 		return -EAGAIN;
818 
819 	return 0;
820 }
821 EXPORT_SYMBOL_GPL(inet_send_prepare);
822 
inet_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)823 int inet_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
824 {
825 	struct sock *sk = sock->sk;
826 
827 	if (unlikely(inet_send_prepare(sk)))
828 		return -EAGAIN;
829 
830 	return INDIRECT_CALL_2(sk->sk_prot->sendmsg, tcp_sendmsg, udp_sendmsg,
831 			       sk, msg, size);
832 }
833 EXPORT_SYMBOL(inet_sendmsg);
834 
inet_sendpage(struct socket * sock,struct page * page,int offset,size_t size,int flags)835 ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
836 		      size_t size, int flags)
837 {
838 	struct sock *sk = sock->sk;
839 
840 	if (unlikely(inet_send_prepare(sk)))
841 		return -EAGAIN;
842 
843 	if (sk->sk_prot->sendpage)
844 		return sk->sk_prot->sendpage(sk, page, offset, size, flags);
845 	return sock_no_sendpage(sock, page, offset, size, flags);
846 }
847 EXPORT_SYMBOL(inet_sendpage);
848 
849 INDIRECT_CALLABLE_DECLARE(int udp_recvmsg(struct sock *, struct msghdr *,
850 					  size_t, int, int, int *));
inet_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)851 int inet_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
852 		 int flags)
853 {
854 	struct sock *sk = sock->sk;
855 	int addr_len = 0;
856 	int err;
857 
858 	if (likely(!(flags & MSG_ERRQUEUE)))
859 		sock_rps_record_flow(sk);
860 
861 	err = INDIRECT_CALL_2(sk->sk_prot->recvmsg, tcp_recvmsg, udp_recvmsg,
862 			      sk, msg, size, flags & MSG_DONTWAIT,
863 			      flags & ~MSG_DONTWAIT, &addr_len);
864 	if (err >= 0)
865 		msg->msg_namelen = addr_len;
866 	return err;
867 }
868 EXPORT_SYMBOL(inet_recvmsg);
869 
inet_shutdown(struct socket * sock,int how)870 int inet_shutdown(struct socket *sock, int how)
871 {
872 	struct sock *sk = sock->sk;
873 	int err = 0;
874 
875 	/* This should really check to make sure
876 	 * the socket is a TCP socket. (WHY AC...)
877 	 */
878 	how++; /* maps 0->1 has the advantage of making bit 1 rcvs and
879 		       1->2 bit 2 snds.
880 		       2->3 */
881 	if ((how & ~SHUTDOWN_MASK) || !how)	/* MAXINT->0 */
882 		return -EINVAL;
883 
884 	lock_sock(sk);
885 	if (sock->state == SS_CONNECTING) {
886 		if ((1 << sk->sk_state) &
887 		    (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE))
888 			sock->state = SS_DISCONNECTING;
889 		else
890 			sock->state = SS_CONNECTED;
891 	}
892 
893 	switch (sk->sk_state) {
894 	case TCP_CLOSE:
895 		err = -ENOTCONN;
896 		/* Hack to wake up other listeners, who can poll for
897 		   EPOLLHUP, even on eg. unconnected UDP sockets -- RR */
898 		fallthrough;
899 	default:
900 		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | how);
901 		if (sk->sk_prot->shutdown)
902 			sk->sk_prot->shutdown(sk, how);
903 		break;
904 
905 	/* Remaining two branches are temporary solution for missing
906 	 * close() in multithreaded environment. It is _not_ a good idea,
907 	 * but we have no choice until close() is repaired at VFS level.
908 	 */
909 	case TCP_LISTEN:
910 		if (!(how & RCV_SHUTDOWN))
911 			break;
912 		fallthrough;
913 	case TCP_SYN_SENT:
914 		err = sk->sk_prot->disconnect(sk, O_NONBLOCK);
915 		sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
916 		break;
917 	}
918 
919 	/* Wake up anyone sleeping in poll. */
920 	sk->sk_state_change(sk);
921 	release_sock(sk);
922 	return err;
923 }
924 EXPORT_SYMBOL(inet_shutdown);
925 
926 /*
927  *	ioctl() calls you can issue on an INET socket. Most of these are
928  *	device configuration and stuff and very rarely used. Some ioctls
929  *	pass on to the socket itself.
930  *
931  *	NOTE: I like the idea of a module for the config stuff. ie ifconfig
932  *	loads the devconfigure module does its configuring and unloads it.
933  *	There's a good 20K of config code hanging around the kernel.
934  */
935 
inet_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)936 int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
937 {
938 	struct sock *sk = sock->sk;
939 	int err = 0;
940 	struct net *net = sock_net(sk);
941 	void __user *p = (void __user *)arg;
942 	struct ifreq ifr;
943 	struct rtentry rt;
944 
945 	switch (cmd) {
946 	case SIOCADDRT:
947 	case SIOCDELRT:
948 		if (copy_from_user(&rt, p, sizeof(struct rtentry)))
949 			return -EFAULT;
950 		err = ip_rt_ioctl(net, cmd, &rt);
951 		break;
952 	case SIOCRTMSG:
953 		err = -EINVAL;
954 		break;
955 	case SIOCDARP:
956 	case SIOCGARP:
957 	case SIOCSARP:
958 		err = arp_ioctl(net, cmd, (void __user *)arg);
959 		break;
960 	case SIOCGIFADDR:
961 	case SIOCGIFBRDADDR:
962 	case SIOCGIFNETMASK:
963 	case SIOCGIFDSTADDR:
964 	case SIOCGIFPFLAGS:
965 		if (get_user_ifreq(&ifr, NULL, p))
966 			return -EFAULT;
967 		err = devinet_ioctl(net, cmd, &ifr);
968 		if (!err && put_user_ifreq(&ifr, p))
969 			err = -EFAULT;
970 		break;
971 
972 	case SIOCSIFADDR:
973 	case SIOCSIFBRDADDR:
974 	case SIOCSIFNETMASK:
975 	case SIOCSIFDSTADDR:
976 	case SIOCSIFPFLAGS:
977 	case SIOCSIFFLAGS:
978 		if (get_user_ifreq(&ifr, NULL, p))
979 			return -EFAULT;
980 		err = devinet_ioctl(net, cmd, &ifr);
981 		break;
982 	default:
983 		if (sk->sk_prot->ioctl)
984 			err = sk->sk_prot->ioctl(sk, cmd, arg);
985 		else
986 			err = -ENOIOCTLCMD;
987 		break;
988 	}
989 	return err;
990 }
991 EXPORT_SYMBOL(inet_ioctl);
992 
993 #ifdef CONFIG_COMPAT
inet_compat_routing_ioctl(struct sock * sk,unsigned int cmd,struct compat_rtentry __user * ur)994 static int inet_compat_routing_ioctl(struct sock *sk, unsigned int cmd,
995 		struct compat_rtentry __user *ur)
996 {
997 	compat_uptr_t rtdev;
998 	struct rtentry rt;
999 
1000 	if (copy_from_user(&rt.rt_dst, &ur->rt_dst,
1001 			3 * sizeof(struct sockaddr)) ||
1002 	    get_user(rt.rt_flags, &ur->rt_flags) ||
1003 	    get_user(rt.rt_metric, &ur->rt_metric) ||
1004 	    get_user(rt.rt_mtu, &ur->rt_mtu) ||
1005 	    get_user(rt.rt_window, &ur->rt_window) ||
1006 	    get_user(rt.rt_irtt, &ur->rt_irtt) ||
1007 	    get_user(rtdev, &ur->rt_dev))
1008 		return -EFAULT;
1009 
1010 	rt.rt_dev = compat_ptr(rtdev);
1011 	return ip_rt_ioctl(sock_net(sk), cmd, &rt);
1012 }
1013 
inet_compat_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1014 static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1015 {
1016 	void __user *argp = compat_ptr(arg);
1017 	struct sock *sk = sock->sk;
1018 
1019 	switch (cmd) {
1020 	case SIOCADDRT:
1021 	case SIOCDELRT:
1022 		return inet_compat_routing_ioctl(sk, cmd, argp);
1023 	default:
1024 		if (!sk->sk_prot->compat_ioctl)
1025 			return -ENOIOCTLCMD;
1026 		return sk->sk_prot->compat_ioctl(sk, cmd, arg);
1027 	}
1028 }
1029 #endif /* CONFIG_COMPAT */
1030 
1031 const struct proto_ops inet_stream_ops = {
1032 	.family		   = PF_INET,
1033 	.owner		   = THIS_MODULE,
1034 	.release	   = inet_release,
1035 	.bind		   = inet_bind,
1036 	.connect	   = inet_stream_connect,
1037 	.socketpair	   = sock_no_socketpair,
1038 	.accept		   = inet_accept,
1039 	.getname	   = inet_getname,
1040 	.poll		   = tcp_poll,
1041 	.ioctl		   = inet_ioctl,
1042 	.gettstamp	   = sock_gettstamp,
1043 	.listen		   = inet_listen,
1044 	.shutdown	   = inet_shutdown,
1045 	.setsockopt	   = sock_common_setsockopt,
1046 	.getsockopt	   = sock_common_getsockopt,
1047 	.sendmsg	   = inet_sendmsg,
1048 	.recvmsg	   = inet_recvmsg,
1049 #ifdef CONFIG_MMU
1050 	.mmap		   = tcp_mmap,
1051 #endif
1052 	.sendpage	   = inet_sendpage,
1053 	.splice_read	   = tcp_splice_read,
1054 	.read_sock	   = tcp_read_sock,
1055 	.sendmsg_locked    = tcp_sendmsg_locked,
1056 	.sendpage_locked   = tcp_sendpage_locked,
1057 	.peek_len	   = tcp_peek_len,
1058 #ifdef CONFIG_COMPAT
1059 	.compat_ioctl	   = inet_compat_ioctl,
1060 #endif
1061 	.set_rcvlowat	   = tcp_set_rcvlowat,
1062 };
1063 EXPORT_SYMBOL(inet_stream_ops);
1064 
1065 const struct proto_ops inet_dgram_ops = {
1066 	.family		   = PF_INET,
1067 	.owner		   = THIS_MODULE,
1068 	.release	   = inet_release,
1069 	.bind		   = inet_bind,
1070 	.connect	   = inet_dgram_connect,
1071 	.socketpair	   = sock_no_socketpair,
1072 	.accept		   = sock_no_accept,
1073 	.getname	   = inet_getname,
1074 	.poll		   = udp_poll,
1075 	.ioctl		   = inet_ioctl,
1076 	.gettstamp	   = sock_gettstamp,
1077 	.listen		   = sock_no_listen,
1078 	.shutdown	   = inet_shutdown,
1079 	.setsockopt	   = sock_common_setsockopt,
1080 	.getsockopt	   = sock_common_getsockopt,
1081 	.sendmsg	   = inet_sendmsg,
1082 	.read_sock	   = udp_read_sock,
1083 	.recvmsg	   = inet_recvmsg,
1084 	.mmap		   = sock_no_mmap,
1085 	.sendpage	   = inet_sendpage,
1086 	.set_peek_off	   = sk_set_peek_off,
1087 #ifdef CONFIG_COMPAT
1088 	.compat_ioctl	   = inet_compat_ioctl,
1089 #endif
1090 };
1091 EXPORT_SYMBOL(inet_dgram_ops);
1092 
1093 /*
1094  * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
1095  * udp_poll
1096  */
1097 static const struct proto_ops inet_sockraw_ops = {
1098 	.family		   = PF_INET,
1099 	.owner		   = THIS_MODULE,
1100 	.release	   = inet_release,
1101 	.bind		   = inet_bind,
1102 	.connect	   = inet_dgram_connect,
1103 	.socketpair	   = sock_no_socketpair,
1104 	.accept		   = sock_no_accept,
1105 	.getname	   = inet_getname,
1106 	.poll		   = datagram_poll,
1107 	.ioctl		   = inet_ioctl,
1108 	.gettstamp	   = sock_gettstamp,
1109 	.listen		   = sock_no_listen,
1110 	.shutdown	   = inet_shutdown,
1111 	.setsockopt	   = sock_common_setsockopt,
1112 	.getsockopt	   = sock_common_getsockopt,
1113 	.sendmsg	   = inet_sendmsg,
1114 	.recvmsg	   = inet_recvmsg,
1115 	.mmap		   = sock_no_mmap,
1116 	.sendpage	   = inet_sendpage,
1117 #ifdef CONFIG_COMPAT
1118 	.compat_ioctl	   = inet_compat_ioctl,
1119 #endif
1120 };
1121 
1122 static const struct net_proto_family inet_family_ops = {
1123 	.family = PF_INET,
1124 	.create = inet_create,
1125 	.owner	= THIS_MODULE,
1126 };
1127 
1128 /* Upon startup we insert all the elements in inetsw_array[] into
1129  * the linked list inetsw.
1130  */
1131 static struct inet_protosw inetsw_array[] =
1132 {
1133 	{
1134 		.type =       SOCK_STREAM,
1135 		.protocol =   IPPROTO_TCP,
1136 		.prot =       &tcp_prot,
1137 		.ops =        &inet_stream_ops,
1138 		.flags =      INET_PROTOSW_PERMANENT |
1139 			      INET_PROTOSW_ICSK,
1140 	},
1141 
1142 	{
1143 		.type =       SOCK_DGRAM,
1144 		.protocol =   IPPROTO_UDP,
1145 		.prot =       &udp_prot,
1146 		.ops =        &inet_dgram_ops,
1147 		.flags =      INET_PROTOSW_PERMANENT,
1148        },
1149 
1150        {
1151 		.type =       SOCK_DGRAM,
1152 		.protocol =   IPPROTO_ICMP,
1153 		.prot =       &ping_prot,
1154 		.ops =        &inet_sockraw_ops,
1155 		.flags =      INET_PROTOSW_REUSE,
1156        },
1157 
1158        {
1159 	       .type =       SOCK_RAW,
1160 	       .protocol =   IPPROTO_IP,	/* wild card */
1161 	       .prot =       &raw_prot,
1162 	       .ops =        &inet_sockraw_ops,
1163 	       .flags =      INET_PROTOSW_REUSE,
1164        }
1165 };
1166 
1167 #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array)
1168 
inet_register_protosw(struct inet_protosw * p)1169 void inet_register_protosw(struct inet_protosw *p)
1170 {
1171 	struct list_head *lh;
1172 	struct inet_protosw *answer;
1173 	int protocol = p->protocol;
1174 	struct list_head *last_perm;
1175 
1176 	spin_lock_bh(&inetsw_lock);
1177 
1178 	if (p->type >= SOCK_MAX)
1179 		goto out_illegal;
1180 
1181 	/* If we are trying to override a permanent protocol, bail. */
1182 	last_perm = &inetsw[p->type];
1183 	list_for_each(lh, &inetsw[p->type]) {
1184 		answer = list_entry(lh, struct inet_protosw, list);
1185 		/* Check only the non-wild match. */
1186 		if ((INET_PROTOSW_PERMANENT & answer->flags) == 0)
1187 			break;
1188 		if (protocol == answer->protocol)
1189 			goto out_permanent;
1190 		last_perm = lh;
1191 	}
1192 
1193 	/* Add the new entry after the last permanent entry if any, so that
1194 	 * the new entry does not override a permanent entry when matched with
1195 	 * a wild-card protocol. But it is allowed to override any existing
1196 	 * non-permanent entry.  This means that when we remove this entry, the
1197 	 * system automatically returns to the old behavior.
1198 	 */
1199 	list_add_rcu(&p->list, last_perm);
1200 out:
1201 	spin_unlock_bh(&inetsw_lock);
1202 
1203 	return;
1204 
1205 out_permanent:
1206 	pr_err("Attempt to override permanent protocol %d\n", protocol);
1207 	goto out;
1208 
1209 out_illegal:
1210 	pr_err("Ignoring attempt to register invalid socket type %d\n",
1211 	       p->type);
1212 	goto out;
1213 }
1214 EXPORT_SYMBOL(inet_register_protosw);
1215 
inet_unregister_protosw(struct inet_protosw * p)1216 void inet_unregister_protosw(struct inet_protosw *p)
1217 {
1218 	if (INET_PROTOSW_PERMANENT & p->flags) {
1219 		pr_err("Attempt to unregister permanent protocol %d\n",
1220 		       p->protocol);
1221 	} else {
1222 		spin_lock_bh(&inetsw_lock);
1223 		list_del_rcu(&p->list);
1224 		spin_unlock_bh(&inetsw_lock);
1225 
1226 		synchronize_net();
1227 	}
1228 }
1229 EXPORT_SYMBOL(inet_unregister_protosw);
1230 
inet_sk_reselect_saddr(struct sock * sk)1231 static int inet_sk_reselect_saddr(struct sock *sk)
1232 {
1233 	struct inet_sock *inet = inet_sk(sk);
1234 	__be32 old_saddr = inet->inet_saddr;
1235 	__be32 daddr = inet->inet_daddr;
1236 	struct flowi4 *fl4;
1237 	struct rtable *rt;
1238 	__be32 new_saddr;
1239 	struct ip_options_rcu *inet_opt;
1240 
1241 	inet_opt = rcu_dereference_protected(inet->inet_opt,
1242 					     lockdep_sock_is_held(sk));
1243 	if (inet_opt && inet_opt->opt.srr)
1244 		daddr = inet_opt->opt.faddr;
1245 
1246 	/* Query new route. */
1247 	fl4 = &inet->cork.fl.u.ip4;
1248 	rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
1249 			      sk->sk_bound_dev_if, sk->sk_protocol,
1250 			      inet->inet_sport, inet->inet_dport, sk);
1251 	if (IS_ERR(rt))
1252 		return PTR_ERR(rt);
1253 
1254 	sk_setup_caps(sk, &rt->dst);
1255 
1256 	new_saddr = fl4->saddr;
1257 
1258 	if (new_saddr == old_saddr)
1259 		return 0;
1260 
1261 	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {
1262 		pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",
1263 			__func__, &old_saddr, &new_saddr);
1264 	}
1265 
1266 	inet->inet_saddr = inet->inet_rcv_saddr = new_saddr;
1267 
1268 	/*
1269 	 * XXX The only one ugly spot where we need to
1270 	 * XXX really change the sockets identity after
1271 	 * XXX it has entered the hashes. -DaveM
1272 	 *
1273 	 * Besides that, it does not check for connection
1274 	 * uniqueness. Wait for troubles.
1275 	 */
1276 	return __sk_prot_rehash(sk);
1277 }
1278 
inet_sk_rebuild_header(struct sock * sk)1279 int inet_sk_rebuild_header(struct sock *sk)
1280 {
1281 	struct inet_sock *inet = inet_sk(sk);
1282 	struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1283 	__be32 daddr;
1284 	struct ip_options_rcu *inet_opt;
1285 	struct flowi4 *fl4;
1286 	int err;
1287 
1288 	/* Route is OK, nothing to do. */
1289 	if (rt)
1290 		return 0;
1291 
1292 	/* Reroute. */
1293 	rcu_read_lock();
1294 	inet_opt = rcu_dereference(inet->inet_opt);
1295 	daddr = inet->inet_daddr;
1296 	if (inet_opt && inet_opt->opt.srr)
1297 		daddr = inet_opt->opt.faddr;
1298 	rcu_read_unlock();
1299 	fl4 = &inet->cork.fl.u.ip4;
1300 	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1301 				   inet->inet_dport, inet->inet_sport,
1302 				   sk->sk_protocol, RT_CONN_FLAGS(sk),
1303 				   sk->sk_bound_dev_if);
1304 	if (!IS_ERR(rt)) {
1305 		err = 0;
1306 		sk_setup_caps(sk, &rt->dst);
1307 	} else {
1308 		err = PTR_ERR(rt);
1309 
1310 		/* Routing failed... */
1311 		sk->sk_route_caps = 0;
1312 		/*
1313 		 * Other protocols have to map its equivalent state to TCP_SYN_SENT.
1314 		 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
1315 		 */
1316 		if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||
1317 		    sk->sk_state != TCP_SYN_SENT ||
1318 		    (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
1319 		    (err = inet_sk_reselect_saddr(sk)) != 0)
1320 			sk->sk_err_soft = -err;
1321 	}
1322 
1323 	return err;
1324 }
1325 EXPORT_SYMBOL(inet_sk_rebuild_header);
1326 
inet_sk_set_state(struct sock * sk,int state)1327 void inet_sk_set_state(struct sock *sk, int state)
1328 {
1329 	trace_inet_sock_set_state(sk, sk->sk_state, state);
1330 	sk->sk_state = state;
1331 }
1332 EXPORT_SYMBOL(inet_sk_set_state);
1333 
inet_sk_state_store(struct sock * sk,int newstate)1334 void inet_sk_state_store(struct sock *sk, int newstate)
1335 {
1336 	trace_inet_sock_set_state(sk, sk->sk_state, newstate);
1337 	smp_store_release(&sk->sk_state, newstate);
1338 }
1339 
inet_gso_segment(struct sk_buff * skb,netdev_features_t features)1340 struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1341 				 netdev_features_t features)
1342 {
1343 	bool udpfrag = false, fixedid = false, gso_partial, encap;
1344 	struct sk_buff *segs = ERR_PTR(-EINVAL);
1345 	const struct net_offload *ops;
1346 	unsigned int offset = 0;
1347 	struct iphdr *iph;
1348 	int proto, tot_len;
1349 	int nhoff;
1350 	int ihl;
1351 	int id;
1352 
1353 	skb_reset_network_header(skb);
1354 	nhoff = skb_network_header(skb) - skb_mac_header(skb);
1355 	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
1356 		goto out;
1357 
1358 	iph = ip_hdr(skb);
1359 	ihl = iph->ihl * 4;
1360 	if (ihl < sizeof(*iph))
1361 		goto out;
1362 
1363 	id = ntohs(iph->id);
1364 	proto = iph->protocol;
1365 
1366 	/* Warning: after this point, iph might be no longer valid */
1367 	if (unlikely(!pskb_may_pull(skb, ihl)))
1368 		goto out;
1369 	__skb_pull(skb, ihl);
1370 
1371 	encap = SKB_GSO_CB(skb)->encap_level > 0;
1372 	if (encap)
1373 		features &= skb->dev->hw_enc_features;
1374 	SKB_GSO_CB(skb)->encap_level += ihl;
1375 
1376 	skb_reset_transport_header(skb);
1377 
1378 	segs = ERR_PTR(-EPROTONOSUPPORT);
1379 
1380 	if (!skb->encapsulation || encap) {
1381 		udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
1382 		fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
1383 
1384 		/* fixed ID is invalid if DF bit is not set */
1385 		if (fixedid && !(ip_hdr(skb)->frag_off & htons(IP_DF)))
1386 			goto out;
1387 	}
1388 
1389 	ops = rcu_dereference(inet_offloads[proto]);
1390 	if (likely(ops && ops->callbacks.gso_segment)) {
1391 		segs = ops->callbacks.gso_segment(skb, features);
1392 		if (!segs)
1393 			skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
1394 	}
1395 
1396 	if (IS_ERR_OR_NULL(segs))
1397 		goto out;
1398 
1399 	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
1400 
1401 	skb = segs;
1402 	do {
1403 		iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
1404 		if (udpfrag) {
1405 			iph->frag_off = htons(offset >> 3);
1406 			if (skb->next)
1407 				iph->frag_off |= htons(IP_MF);
1408 			offset += skb->len - nhoff - ihl;
1409 			tot_len = skb->len - nhoff;
1410 		} else if (skb_is_gso(skb)) {
1411 			if (!fixedid) {
1412 				iph->id = htons(id);
1413 				id += skb_shinfo(skb)->gso_segs;
1414 			}
1415 
1416 			if (gso_partial)
1417 				tot_len = skb_shinfo(skb)->gso_size +
1418 					  SKB_GSO_CB(skb)->data_offset +
1419 					  skb->head - (unsigned char *)iph;
1420 			else
1421 				tot_len = skb->len - nhoff;
1422 		} else {
1423 			if (!fixedid)
1424 				iph->id = htons(id++);
1425 			tot_len = skb->len - nhoff;
1426 		}
1427 		iph->tot_len = htons(tot_len);
1428 		ip_send_check(iph);
1429 		if (encap)
1430 			skb_reset_inner_headers(skb);
1431 		skb->network_header = (u8 *)iph - skb->head;
1432 		skb_reset_mac_len(skb);
1433 	} while ((skb = skb->next));
1434 
1435 out:
1436 	return segs;
1437 }
1438 
ipip_gso_segment(struct sk_buff * skb,netdev_features_t features)1439 static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
1440 					netdev_features_t features)
1441 {
1442 	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
1443 		return ERR_PTR(-EINVAL);
1444 
1445 	return inet_gso_segment(skb, features);
1446 }
1447 
inet_gro_receive(struct list_head * head,struct sk_buff * skb)1448 struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
1449 {
1450 	const struct net_offload *ops;
1451 	struct sk_buff *pp = NULL;
1452 	const struct iphdr *iph;
1453 	struct sk_buff *p;
1454 	unsigned int hlen;
1455 	unsigned int off;
1456 	unsigned int id;
1457 	int flush = 1;
1458 	int proto;
1459 
1460 	off = skb_gro_offset(skb);
1461 	hlen = off + sizeof(*iph);
1462 	iph = skb_gro_header_fast(skb, off);
1463 	if (skb_gro_header_hard(skb, hlen)) {
1464 		iph = skb_gro_header_slow(skb, hlen, off);
1465 		if (unlikely(!iph))
1466 			goto out;
1467 	}
1468 
1469 	proto = iph->protocol;
1470 
1471 	rcu_read_lock();
1472 	ops = rcu_dereference(inet_offloads[proto]);
1473 	if (!ops || !ops->callbacks.gro_receive)
1474 		goto out_unlock;
1475 
1476 	if (*(u8 *)iph != 0x45)
1477 		goto out_unlock;
1478 
1479 	if (ip_is_fragment(iph))
1480 		goto out_unlock;
1481 
1482 	if (unlikely(ip_fast_csum((u8 *)iph, 5)))
1483 		goto out_unlock;
1484 
1485 	id = ntohl(*(__be32 *)&iph->id);
1486 	flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
1487 	id >>= 16;
1488 
1489 	list_for_each_entry(p, head, list) {
1490 		struct iphdr *iph2;
1491 		u16 flush_id;
1492 
1493 		if (!NAPI_GRO_CB(p)->same_flow)
1494 			continue;
1495 
1496 		iph2 = (struct iphdr *)(p->data + off);
1497 		/* The above works because, with the exception of the top
1498 		 * (inner most) layer, we only aggregate pkts with the same
1499 		 * hdr length so all the hdrs we'll need to verify will start
1500 		 * at the same offset.
1501 		 */
1502 		if ((iph->protocol ^ iph2->protocol) |
1503 		    ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1504 		    ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1505 			NAPI_GRO_CB(p)->same_flow = 0;
1506 			continue;
1507 		}
1508 
1509 		/* All fields must match except length and checksum. */
1510 		NAPI_GRO_CB(p)->flush |=
1511 			(iph->ttl ^ iph2->ttl) |
1512 			(iph->tos ^ iph2->tos) |
1513 			((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
1514 
1515 		NAPI_GRO_CB(p)->flush |= flush;
1516 
1517 		/* We need to store of the IP ID check to be included later
1518 		 * when we can verify that this packet does in fact belong
1519 		 * to a given flow.
1520 		 */
1521 		flush_id = (u16)(id - ntohs(iph2->id));
1522 
1523 		/* This bit of code makes it much easier for us to identify
1524 		 * the cases where we are doing atomic vs non-atomic IP ID
1525 		 * checks.  Specifically an atomic check can return IP ID
1526 		 * values 0 - 0xFFFF, while a non-atomic check can only
1527 		 * return 0 or 0xFFFF.
1528 		 */
1529 		if (!NAPI_GRO_CB(p)->is_atomic ||
1530 		    !(iph->frag_off & htons(IP_DF))) {
1531 			flush_id ^= NAPI_GRO_CB(p)->count;
1532 			flush_id = flush_id ? 0xFFFF : 0;
1533 		}
1534 
1535 		/* If the previous IP ID value was based on an atomic
1536 		 * datagram we can overwrite the value and ignore it.
1537 		 */
1538 		if (NAPI_GRO_CB(skb)->is_atomic)
1539 			NAPI_GRO_CB(p)->flush_id = flush_id;
1540 		else
1541 			NAPI_GRO_CB(p)->flush_id |= flush_id;
1542 	}
1543 
1544 	NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
1545 	NAPI_GRO_CB(skb)->flush |= flush;
1546 	skb_set_network_header(skb, off);
1547 	/* The above will be needed by the transport layer if there is one
1548 	 * immediately following this IP hdr.
1549 	 */
1550 
1551 	/* Note : No need to call skb_gro_postpull_rcsum() here,
1552 	 * as we already checked checksum over ipv4 header was 0
1553 	 */
1554 	skb_gro_pull(skb, sizeof(*iph));
1555 	skb_set_transport_header(skb, skb_gro_offset(skb));
1556 
1557 	pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
1558 				       ops->callbacks.gro_receive, head, skb);
1559 
1560 out_unlock:
1561 	rcu_read_unlock();
1562 
1563 out:
1564 	skb_gro_flush_final(skb, pp, flush);
1565 
1566 	return pp;
1567 }
1568 
ipip_gro_receive(struct list_head * head,struct sk_buff * skb)1569 static struct sk_buff *ipip_gro_receive(struct list_head *head,
1570 					struct sk_buff *skb)
1571 {
1572 	if (NAPI_GRO_CB(skb)->encap_mark) {
1573 		NAPI_GRO_CB(skb)->flush = 1;
1574 		return NULL;
1575 	}
1576 
1577 	NAPI_GRO_CB(skb)->encap_mark = 1;
1578 
1579 	return inet_gro_receive(head, skb);
1580 }
1581 
1582 #define SECONDS_PER_DAY	86400
1583 
1584 /* inet_current_timestamp - Return IP network timestamp
1585  *
1586  * Return milliseconds since midnight in network byte order.
1587  */
inet_current_timestamp(void)1588 __be32 inet_current_timestamp(void)
1589 {
1590 	u32 secs;
1591 	u32 msecs;
1592 	struct timespec64 ts;
1593 
1594 	ktime_get_real_ts64(&ts);
1595 
1596 	/* Get secs since midnight. */
1597 	(void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
1598 	/* Convert to msecs. */
1599 	msecs = secs * MSEC_PER_SEC;
1600 	/* Convert nsec to msec. */
1601 	msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
1602 
1603 	/* Convert to network byte order. */
1604 	return htonl(msecs);
1605 }
1606 EXPORT_SYMBOL(inet_current_timestamp);
1607 
inet_recv_error(struct sock * sk,struct msghdr * msg,int len,int * addr_len)1608 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1609 {
1610 	unsigned int family = READ_ONCE(sk->sk_family);
1611 
1612 	if (family == AF_INET)
1613 		return ip_recv_error(sk, msg, len, addr_len);
1614 #if IS_ENABLED(CONFIG_IPV6)
1615 	if (family == AF_INET6)
1616 		return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1617 #endif
1618 	return -EINVAL;
1619 }
1620 
inet_gro_complete(struct sk_buff * skb,int nhoff)1621 int inet_gro_complete(struct sk_buff *skb, int nhoff)
1622 {
1623 	__be16 newlen = htons(skb->len - nhoff);
1624 	struct iphdr *iph = (struct iphdr *)(skb->data + nhoff);
1625 	const struct net_offload *ops;
1626 	int proto = iph->protocol;
1627 	int err = -ENOSYS;
1628 
1629 	if (skb->encapsulation) {
1630 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
1631 		skb_set_inner_network_header(skb, nhoff);
1632 	}
1633 
1634 	csum_replace2(&iph->check, iph->tot_len, newlen);
1635 	iph->tot_len = newlen;
1636 
1637 	rcu_read_lock();
1638 	ops = rcu_dereference(inet_offloads[proto]);
1639 	if (WARN_ON(!ops || !ops->callbacks.gro_complete))
1640 		goto out_unlock;
1641 
1642 	/* Only need to add sizeof(*iph) to get to the next hdr below
1643 	 * because any hdr with option will have been flushed in
1644 	 * inet_gro_receive().
1645 	 */
1646 	err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
1647 			      tcp4_gro_complete, udp4_gro_complete,
1648 			      skb, nhoff + sizeof(*iph));
1649 
1650 out_unlock:
1651 	rcu_read_unlock();
1652 
1653 	return err;
1654 }
1655 
ipip_gro_complete(struct sk_buff * skb,int nhoff)1656 static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
1657 {
1658 	skb->encapsulation = 1;
1659 	skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
1660 	return inet_gro_complete(skb, nhoff);
1661 }
1662 
inet_ctl_sock_create(struct sock ** sk,unsigned short family,unsigned short type,unsigned char protocol,struct net * net)1663 int inet_ctl_sock_create(struct sock **sk, unsigned short family,
1664 			 unsigned short type, unsigned char protocol,
1665 			 struct net *net)
1666 {
1667 	struct socket *sock;
1668 	int rc = sock_create_kern(net, family, type, protocol, &sock);
1669 
1670 	if (rc == 0) {
1671 		*sk = sock->sk;
1672 		(*sk)->sk_allocation = GFP_ATOMIC;
1673 		/*
1674 		 * Unhash it so that IP input processing does not even see it,
1675 		 * we do not wish this socket to see incoming packets.
1676 		 */
1677 		(*sk)->sk_prot->unhash(*sk);
1678 	}
1679 	return rc;
1680 }
1681 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1682 
snmp_get_cpu_field(void __percpu * mib,int cpu,int offt)1683 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt)
1684 {
1685 	return  *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt);
1686 }
1687 EXPORT_SYMBOL_GPL(snmp_get_cpu_field);
1688 
snmp_fold_field(void __percpu * mib,int offt)1689 unsigned long snmp_fold_field(void __percpu *mib, int offt)
1690 {
1691 	unsigned long res = 0;
1692 	int i;
1693 
1694 	for_each_possible_cpu(i)
1695 		res += snmp_get_cpu_field(mib, i, offt);
1696 	return res;
1697 }
1698 EXPORT_SYMBOL_GPL(snmp_fold_field);
1699 
1700 #if BITS_PER_LONG==32
1701 
snmp_get_cpu_field64(void __percpu * mib,int cpu,int offt,size_t syncp_offset)1702 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offt,
1703 			 size_t syncp_offset)
1704 {
1705 	void *bhptr;
1706 	struct u64_stats_sync *syncp;
1707 	u64 v;
1708 	unsigned int start;
1709 
1710 	bhptr = per_cpu_ptr(mib, cpu);
1711 	syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1712 	do {
1713 		start = u64_stats_fetch_begin_irq(syncp);
1714 		v = *(((u64 *)bhptr) + offt);
1715 	} while (u64_stats_fetch_retry_irq(syncp, start));
1716 
1717 	return v;
1718 }
1719 EXPORT_SYMBOL_GPL(snmp_get_cpu_field64);
1720 
snmp_fold_field64(void __percpu * mib,int offt,size_t syncp_offset)1721 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_offset)
1722 {
1723 	u64 res = 0;
1724 	int cpu;
1725 
1726 	for_each_possible_cpu(cpu) {
1727 		res += snmp_get_cpu_field64(mib, cpu, offt, syncp_offset);
1728 	}
1729 	return res;
1730 }
1731 EXPORT_SYMBOL_GPL(snmp_fold_field64);
1732 #endif
1733 
1734 #ifdef CONFIG_IP_MULTICAST
1735 static const struct net_protocol igmp_protocol = {
1736 	.handler =	igmp_rcv,
1737 };
1738 #endif
1739 
1740 static const struct net_protocol tcp_protocol = {
1741 	.handler	=	tcp_v4_rcv,
1742 	.err_handler	=	tcp_v4_err,
1743 	.no_policy	=	1,
1744 	.icmp_strict_tag_validation = 1,
1745 };
1746 
1747 static const struct net_protocol udp_protocol = {
1748 	.handler =	udp_rcv,
1749 	.err_handler =	udp_err,
1750 	.no_policy =	1,
1751 };
1752 
1753 static const struct net_protocol icmp_protocol = {
1754 	.handler =	icmp_rcv,
1755 	.err_handler =	icmp_err,
1756 	.no_policy =	1,
1757 };
1758 
ipv4_mib_init_net(struct net * net)1759 static __net_init int ipv4_mib_init_net(struct net *net)
1760 {
1761 	int i;
1762 
1763 	net->mib.tcp_statistics = alloc_percpu(struct tcp_mib);
1764 	if (!net->mib.tcp_statistics)
1765 		goto err_tcp_mib;
1766 	net->mib.ip_statistics = alloc_percpu(struct ipstats_mib);
1767 	if (!net->mib.ip_statistics)
1768 		goto err_ip_mib;
1769 
1770 	for_each_possible_cpu(i) {
1771 		struct ipstats_mib *af_inet_stats;
1772 		af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i);
1773 		u64_stats_init(&af_inet_stats->syncp);
1774 	}
1775 
1776 	net->mib.net_statistics = alloc_percpu(struct linux_mib);
1777 	if (!net->mib.net_statistics)
1778 		goto err_net_mib;
1779 	net->mib.udp_statistics = alloc_percpu(struct udp_mib);
1780 	if (!net->mib.udp_statistics)
1781 		goto err_udp_mib;
1782 	net->mib.udplite_statistics = alloc_percpu(struct udp_mib);
1783 	if (!net->mib.udplite_statistics)
1784 		goto err_udplite_mib;
1785 	net->mib.icmp_statistics = alloc_percpu(struct icmp_mib);
1786 	if (!net->mib.icmp_statistics)
1787 		goto err_icmp_mib;
1788 	net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib),
1789 					      GFP_KERNEL);
1790 	if (!net->mib.icmpmsg_statistics)
1791 		goto err_icmpmsg_mib;
1792 
1793 	tcp_mib_init(net);
1794 	return 0;
1795 
1796 err_icmpmsg_mib:
1797 	free_percpu(net->mib.icmp_statistics);
1798 err_icmp_mib:
1799 	free_percpu(net->mib.udplite_statistics);
1800 err_udplite_mib:
1801 	free_percpu(net->mib.udp_statistics);
1802 err_udp_mib:
1803 	free_percpu(net->mib.net_statistics);
1804 err_net_mib:
1805 	free_percpu(net->mib.ip_statistics);
1806 err_ip_mib:
1807 	free_percpu(net->mib.tcp_statistics);
1808 err_tcp_mib:
1809 	return -ENOMEM;
1810 }
1811 
ipv4_mib_exit_net(struct net * net)1812 static __net_exit void ipv4_mib_exit_net(struct net *net)
1813 {
1814 	kfree(net->mib.icmpmsg_statistics);
1815 	free_percpu(net->mib.icmp_statistics);
1816 	free_percpu(net->mib.udplite_statistics);
1817 	free_percpu(net->mib.udp_statistics);
1818 	free_percpu(net->mib.net_statistics);
1819 	free_percpu(net->mib.ip_statistics);
1820 	free_percpu(net->mib.tcp_statistics);
1821 #ifdef CONFIG_MPTCP
1822 	/* allocated on demand, see mptcp_init_sock() */
1823 	free_percpu(net->mib.mptcp_statistics);
1824 #endif
1825 }
1826 
1827 static __net_initdata struct pernet_operations ipv4_mib_ops = {
1828 	.init = ipv4_mib_init_net,
1829 	.exit = ipv4_mib_exit_net,
1830 };
1831 
init_ipv4_mibs(void)1832 static int __init init_ipv4_mibs(void)
1833 {
1834 	return register_pernet_subsys(&ipv4_mib_ops);
1835 }
1836 
inet_init_net(struct net * net)1837 static __net_init int inet_init_net(struct net *net)
1838 {
1839 	/*
1840 	 * Set defaults for local port range
1841 	 */
1842 	seqlock_init(&net->ipv4.ip_local_ports.lock);
1843 	net->ipv4.ip_local_ports.range[0] =  32768;
1844 	net->ipv4.ip_local_ports.range[1] =  60999;
1845 
1846 	seqlock_init(&net->ipv4.ping_group_range.lock);
1847 	/*
1848 	 * Sane defaults - nobody may create ping sockets.
1849 	 * Boot scripts should set this to distro-specific group.
1850 	 */
1851 	net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1);
1852 	net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0);
1853 
1854 	/* Default values for sysctl-controlled parameters.
1855 	 * We set them here, in case sysctl is not compiled.
1856 	 */
1857 	net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
1858 	net->ipv4.sysctl_ip_fwd_update_priority = 1;
1859 	net->ipv4.sysctl_ip_dynaddr = 0;
1860 	net->ipv4.sysctl_ip_early_demux = 1;
1861 	net->ipv4.sysctl_udp_early_demux = 1;
1862 	net->ipv4.sysctl_tcp_early_demux = 1;
1863 	net->ipv4.sysctl_nexthop_compat_mode = 1;
1864 #ifdef CONFIG_SYSCTL
1865 	net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
1866 #endif
1867 
1868 	/* Some igmp sysctl, whose values are always used */
1869 	net->ipv4.sysctl_igmp_max_memberships = 20;
1870 	net->ipv4.sysctl_igmp_max_msf = 10;
1871 	/* IGMP reports for link-local multicast groups are enabled by default */
1872 	net->ipv4.sysctl_igmp_llm_reports = 1;
1873 	net->ipv4.sysctl_igmp_qrv = 2;
1874 
1875 	net->ipv4.sysctl_fib_notify_on_flag_change = 0;
1876 
1877 	return 0;
1878 }
1879 
1880 static __net_initdata struct pernet_operations af_inet_ops = {
1881 	.init = inet_init_net,
1882 };
1883 
init_inet_pernet_ops(void)1884 static int __init init_inet_pernet_ops(void)
1885 {
1886 	return register_pernet_subsys(&af_inet_ops);
1887 }
1888 
1889 static int ipv4_proc_init(void);
1890 
1891 /*
1892  *	IP protocol layer initialiser
1893  */
1894 
1895 static struct packet_offload ip_packet_offload __read_mostly = {
1896 	.type = cpu_to_be16(ETH_P_IP),
1897 	.callbacks = {
1898 		.gso_segment = inet_gso_segment,
1899 		.gro_receive = inet_gro_receive,
1900 		.gro_complete = inet_gro_complete,
1901 	},
1902 };
1903 
1904 static const struct net_offload ipip_offload = {
1905 	.callbacks = {
1906 		.gso_segment	= ipip_gso_segment,
1907 		.gro_receive	= ipip_gro_receive,
1908 		.gro_complete	= ipip_gro_complete,
1909 	},
1910 };
1911 
ipip_offload_init(void)1912 static int __init ipip_offload_init(void)
1913 {
1914 	return inet_add_offload(&ipip_offload, IPPROTO_IPIP);
1915 }
1916 
ipv4_offload_init(void)1917 static int __init ipv4_offload_init(void)
1918 {
1919 	/*
1920 	 * Add offloads
1921 	 */
1922 	if (udpv4_offload_init() < 0)
1923 		pr_crit("%s: Cannot add UDP protocol offload\n", __func__);
1924 	if (tcpv4_offload_init() < 0)
1925 		pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
1926 	if (ipip_offload_init() < 0)
1927 		pr_crit("%s: Cannot add IPIP protocol offload\n", __func__);
1928 
1929 	dev_add_offload(&ip_packet_offload);
1930 	return 0;
1931 }
1932 
1933 fs_initcall(ipv4_offload_init);
1934 
1935 static struct packet_type ip_packet_type __read_mostly = {
1936 	.type = cpu_to_be16(ETH_P_IP),
1937 	.func = ip_rcv,
1938 	.list_func = ip_list_rcv,
1939 };
1940 
inet_init(void)1941 static int __init inet_init(void)
1942 {
1943 	struct inet_protosw *q;
1944 	struct list_head *r;
1945 	int rc;
1946 
1947 	sock_skb_cb_check_size(sizeof(struct inet_skb_parm));
1948 
1949 	rc = proto_register(&tcp_prot, 1);
1950 	if (rc)
1951 		goto out;
1952 
1953 	rc = proto_register(&udp_prot, 1);
1954 	if (rc)
1955 		goto out_unregister_tcp_proto;
1956 
1957 	rc = proto_register(&raw_prot, 1);
1958 	if (rc)
1959 		goto out_unregister_udp_proto;
1960 
1961 	rc = proto_register(&ping_prot, 1);
1962 	if (rc)
1963 		goto out_unregister_raw_proto;
1964 
1965 	/*
1966 	 *	Tell SOCKET that we are alive...
1967 	 */
1968 
1969 	(void)sock_register(&inet_family_ops);
1970 
1971 #ifdef CONFIG_SYSCTL
1972 	ip_static_sysctl_init();
1973 #endif
1974 
1975 	/*
1976 	 *	Add all the base protocols.
1977 	 */
1978 
1979 	if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0)
1980 		pr_crit("%s: Cannot add ICMP protocol\n", __func__);
1981 	if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0)
1982 		pr_crit("%s: Cannot add UDP protocol\n", __func__);
1983 	if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0)
1984 		pr_crit("%s: Cannot add TCP protocol\n", __func__);
1985 #ifdef CONFIG_IP_MULTICAST
1986 	if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0)
1987 		pr_crit("%s: Cannot add IGMP protocol\n", __func__);
1988 #endif
1989 
1990 	/* Register the socket-side information for inet_create. */
1991 	for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r)
1992 		INIT_LIST_HEAD(r);
1993 
1994 	for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q)
1995 		inet_register_protosw(q);
1996 
1997 	/*
1998 	 *	Set the ARP module up
1999 	 */
2000 
2001 	arp_init();
2002 
2003 	/*
2004 	 *	Set the IP module up
2005 	 */
2006 
2007 	ip_init();
2008 
2009 	/* Initialise per-cpu ipv4 mibs */
2010 	if (init_ipv4_mibs())
2011 		panic("%s: Cannot init ipv4 mibs\n", __func__);
2012 
2013 	/* Setup TCP slab cache for open requests. */
2014 	tcp_init();
2015 
2016 	/* Setup UDP memory threshold */
2017 	udp_init();
2018 
2019 	/* Add UDP-Lite (RFC 3828) */
2020 	udplite4_register();
2021 
2022 	raw_init();
2023 
2024 	ping_init();
2025 
2026 	/*
2027 	 *	Set the ICMP layer up
2028 	 */
2029 
2030 	if (icmp_init() < 0)
2031 		panic("Failed to create the ICMP control socket.\n");
2032 
2033 	/*
2034 	 *	Initialise the multicast router
2035 	 */
2036 #if defined(CONFIG_IP_MROUTE)
2037 	if (ip_mr_init())
2038 		pr_crit("%s: Cannot init ipv4 mroute\n", __func__);
2039 #endif
2040 
2041 	if (init_inet_pernet_ops())
2042 		pr_crit("%s: Cannot init ipv4 inet pernet ops\n", __func__);
2043 
2044 	ipv4_proc_init();
2045 
2046 	ipfrag_init();
2047 
2048 	dev_add_pack(&ip_packet_type);
2049 
2050 	ip_tunnel_core_init();
2051 
2052 	rc = 0;
2053 out:
2054 	return rc;
2055 out_unregister_raw_proto:
2056 	proto_unregister(&raw_prot);
2057 out_unregister_udp_proto:
2058 	proto_unregister(&udp_prot);
2059 out_unregister_tcp_proto:
2060 	proto_unregister(&tcp_prot);
2061 	goto out;
2062 }
2063 
2064 fs_initcall(inet_init);
2065 
2066 /* ------------------------------------------------------------------------ */
2067 
2068 #ifdef CONFIG_PROC_FS
ipv4_proc_init(void)2069 static int __init ipv4_proc_init(void)
2070 {
2071 	int rc = 0;
2072 
2073 	if (raw_proc_init())
2074 		goto out_raw;
2075 	if (tcp4_proc_init())
2076 		goto out_tcp;
2077 	if (udp4_proc_init())
2078 		goto out_udp;
2079 	if (ping_proc_init())
2080 		goto out_ping;
2081 	if (ip_misc_proc_init())
2082 		goto out_misc;
2083 out:
2084 	return rc;
2085 out_misc:
2086 	ping_proc_exit();
2087 out_ping:
2088 	udp4_proc_exit();
2089 out_udp:
2090 	tcp4_proc_exit();
2091 out_tcp:
2092 	raw_proc_exit();
2093 out_raw:
2094 	rc = -ENOMEM;
2095 	goto out;
2096 }
2097 
2098 #else /* CONFIG_PROC_FS */
ipv4_proc_init(void)2099 static int __init ipv4_proc_init(void)
2100 {
2101 	return 0;
2102 }
2103 #endif /* CONFIG_PROC_FS */
2104