• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Generic socket support routines. Memory allocators, socket lock/release
8  *		handler for protocols to use and generic option handler.
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *		Florian La Roche, <flla@stud.uni-sb.de>
13  *		Alan Cox, <A.Cox@swansea.ac.uk>
14  *
15  * Fixes:
16  *		Alan Cox	: 	Numerous verify_area() problems
17  *		Alan Cox	:	Connecting on a connecting socket
18  *					now returns an error for tcp.
19  *		Alan Cox	:	sock->protocol is set correctly.
20  *					and is not sometimes left as 0.
21  *		Alan Cox	:	connect handles icmp errors on a
22  *					connect properly. Unfortunately there
23  *					is a restart syscall nasty there. I
24  *					can't match BSD without hacking the C
25  *					library. Ideas urgently sought!
26  *		Alan Cox	:	Disallow bind() to addresses that are
27  *					not ours - especially broadcast ones!!
28  *		Alan Cox	:	Socket 1024 _IS_ ok for users. (fencepost)
29  *		Alan Cox	:	sock_wfree/sock_rfree don't destroy sockets,
30  *					instead they leave that for the DESTROY timer.
31  *		Alan Cox	:	Clean up error flag in accept
32  *		Alan Cox	:	TCP ack handling is buggy, the DESTROY timer
33  *					was buggy. Put a remove_sock() in the handler
34  *					for memory when we hit 0. Also altered the timer
35  *					code. The ACK stuff can wait and needs major
36  *					TCP layer surgery.
37  *		Alan Cox	:	Fixed TCP ack bug, removed remove sock
38  *					and fixed timer/inet_bh race.
39  *		Alan Cox	:	Added zapped flag for TCP
40  *		Alan Cox	:	Move kfree_skb into skbuff.c and tidied up surplus code
41  *		Alan Cox	:	for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
42  *		Alan Cox	:	kfree_s calls now are kfree_skbmem so we can track skb resources
43  *		Alan Cox	:	Supports socket option broadcast now as does udp. Packet and raw need fixing.
44  *		Alan Cox	:	Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
45  *		Rick Sladkey	:	Relaxed UDP rules for matching packets.
46  *		C.E.Hawkins	:	IFF_PROMISC/SIOCGHWADDR support
47  *	Pauline Middelink	:	identd support
48  *		Alan Cox	:	Fixed connect() taking signals I think.
49  *		Alan Cox	:	SO_LINGER supported
50  *		Alan Cox	:	Error reporting fixes
51  *		Anonymous	:	inet_create tidied up (sk->reuse setting)
52  *		Alan Cox	:	inet sockets don't set sk->type!
53  *		Alan Cox	:	Split socket option code
54  *		Alan Cox	:	Callbacks
55  *		Alan Cox	:	Nagle flag for Charles & Johannes stuff
56  *		Alex		:	Removed restriction on inet fioctl
57  *		Alan Cox	:	Splitting INET from NET core
58  *		Alan Cox	:	Fixed bogus SO_TYPE handling in getsockopt()
59  *		Adam Caldwell	:	Missing return in SO_DONTROUTE/SO_DEBUG code
60  *		Alan Cox	:	Split IP from generic code
61  *		Alan Cox	:	New kfree_skbmem()
62  *		Alan Cox	:	Make SO_DEBUG superuser only.
63  *		Alan Cox	:	Allow anyone to clear SO_DEBUG
64  *					(compatibility fix)
65  *		Alan Cox	:	Added optimistic memory grabbing for AF_UNIX throughput.
66  *		Alan Cox	:	Allocator for a socket is settable.
67  *		Alan Cox	:	SO_ERROR includes soft errors.
68  *		Alan Cox	:	Allow NULL arguments on some SO_ opts
69  *		Alan Cox	: 	Generic socket allocation to make hooks
70  *					easier (suggested by Craig Metz).
71  *		Michael Pall	:	SO_ERROR returns positive errno again
72  *              Steve Whitehouse:       Added default destructor to free
73  *                                      protocol private data.
74  *              Steve Whitehouse:       Added various other default routines
75  *                                      common to several socket families.
76  *              Chris Evans     :       Call suser() check last on F_SETOWN
77  *		Jay Schulist	:	Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
78  *		Andi Kleen	:	Add sock_kmalloc()/sock_kfree_s()
79  *		Andi Kleen	:	Fix write_space callback
80  *		Chris Evans	:	Security fixes - signedness again
81  *		Arnaldo C. Melo :       cleanups, use skb_queue_purge
82  *
83  * To Fix:
84  */
85 
86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
87 
88 #include <asm/unaligned.h>
89 #include <linux/capability.h>
90 #include <linux/errno.h>
91 #include <linux/errqueue.h>
92 #include <linux/types.h>
93 #include <linux/socket.h>
94 #include <linux/in.h>
95 #include <linux/kernel.h>
96 #include <linux/module.h>
97 #include <linux/proc_fs.h>
98 #include <linux/seq_file.h>
99 #include <linux/sched.h>
100 #include <linux/sched/mm.h>
101 #include <linux/timer.h>
102 #include <linux/string.h>
103 #include <linux/sockios.h>
104 #include <linux/net.h>
105 #include <linux/mm.h>
106 #include <linux/slab.h>
107 #include <linux/interrupt.h>
108 #include <linux/poll.h>
109 #include <linux/tcp.h>
110 #include <linux/init.h>
111 #include <linux/highmem.h>
112 #include <linux/user_namespace.h>
113 #include <linux/static_key.h>
114 #include <linux/memcontrol.h>
115 #include <linux/prefetch.h>
116 #include <linux/compat.h>
117 
118 #include <linux/uaccess.h>
119 
120 #include <linux/netdevice.h>
121 #include <net/protocol.h>
122 #include <linux/skbuff.h>
123 #include <net/net_namespace.h>
124 #include <net/request_sock.h>
125 #include <net/sock.h>
126 #include <linux/net_tstamp.h>
127 #include <net/xfrm.h>
128 #include <linux/ipsec.h>
129 #include <net/cls_cgroup.h>
130 #include <net/netprio_cgroup.h>
131 #include <linux/sock_diag.h>
132 
133 #include <linux/filter.h>
134 #include <net/sock_reuseport.h>
135 #include <net/bpf_sk_storage.h>
136 
137 #include <trace/events/sock.h>
138 
139 #include <net/tcp.h>
140 #include <net/busy_poll.h>
141 
142 static DEFINE_MUTEX(proto_list_mutex);
143 static LIST_HEAD(proto_list);
144 
145 static void sock_inuse_add(struct net *net, int val);
146 
147 /**
148  * sk_ns_capable - General socket capability test
149  * @sk: Socket to use a capability on or through
150  * @user_ns: The user namespace of the capability to use
151  * @cap: The capability to use
152  *
153  * Test to see if the opener of the socket had when the socket was
154  * created and the current process has the capability @cap in the user
155  * namespace @user_ns.
156  */
sk_ns_capable(const struct sock * sk,struct user_namespace * user_ns,int cap)157 bool sk_ns_capable(const struct sock *sk,
158 		   struct user_namespace *user_ns, int cap)
159 {
160 	return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
161 		ns_capable(user_ns, cap);
162 }
163 EXPORT_SYMBOL(sk_ns_capable);
164 
165 /**
166  * sk_capable - Socket global capability test
167  * @sk: Socket to use a capability on or through
168  * @cap: The global capability to use
169  *
170  * Test to see if the opener of the socket had when the socket was
171  * created and the current process has the capability @cap in all user
172  * namespaces.
173  */
sk_capable(const struct sock * sk,int cap)174 bool sk_capable(const struct sock *sk, int cap)
175 {
176 	return sk_ns_capable(sk, &init_user_ns, cap);
177 }
178 EXPORT_SYMBOL(sk_capable);
179 
180 /**
181  * sk_net_capable - Network namespace socket capability test
182  * @sk: Socket to use a capability on or through
183  * @cap: The capability to use
184  *
185  * Test to see if the opener of the socket had when the socket was created
186  * and the current process has the capability @cap over the network namespace
187  * the socket is a member of.
188  */
sk_net_capable(const struct sock * sk,int cap)189 bool sk_net_capable(const struct sock *sk, int cap)
190 {
191 	return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
192 }
193 EXPORT_SYMBOL(sk_net_capable);
194 
195 /*
196  * Each address family might have different locking rules, so we have
197  * one slock key per address family and separate keys for internal and
198  * userspace sockets.
199  */
200 static struct lock_class_key af_family_keys[AF_MAX];
201 static struct lock_class_key af_family_kern_keys[AF_MAX];
202 static struct lock_class_key af_family_slock_keys[AF_MAX];
203 static struct lock_class_key af_family_kern_slock_keys[AF_MAX];
204 
205 /*
206  * Make lock validator output more readable. (we pre-construct these
207  * strings build-time, so that runtime initialization of socket
208  * locks is fast):
209  */
210 
211 #define _sock_locks(x)						  \
212   x "AF_UNSPEC",	x "AF_UNIX"     ,	x "AF_INET"     , \
213   x "AF_AX25"  ,	x "AF_IPX"      ,	x "AF_APPLETALK", \
214   x "AF_NETROM",	x "AF_BRIDGE"   ,	x "AF_ATMPVC"   , \
215   x "AF_X25"   ,	x "AF_INET6"    ,	x "AF_ROSE"     , \
216   x "AF_DECnet",	x "AF_NETBEUI"  ,	x "AF_SECURITY" , \
217   x "AF_KEY"   ,	x "AF_NETLINK"  ,	x "AF_PACKET"   , \
218   x "AF_ASH"   ,	x "AF_ECONET"   ,	x "AF_ATMSVC"   , \
219   x "AF_RDS"   ,	x "AF_SNA"      ,	x "AF_IRDA"     , \
220   x "AF_PPPOX" ,	x "AF_WANPIPE"  ,	x "AF_LLC"      , \
221   x "27"       ,	x "28"          ,	x "AF_CAN"      , \
222   x "AF_TIPC"  ,	x "AF_BLUETOOTH",	x "IUCV"        , \
223   x "AF_RXRPC" ,	x "AF_ISDN"     ,	x "AF_PHONET"   , \
224   x "AF_IEEE802154",	x "AF_CAIF"	,	x "AF_ALG"      , \
225   x "AF_NFC"   ,	x "AF_VSOCK"    ,	x "AF_KCM"      , \
226   x "AF_QIPCRTR",	x "AF_SMC"	,	x "AF_XDP"	, \
227   x "AF_MAX"
228 
229 static const char *const af_family_key_strings[AF_MAX+1] = {
230 	_sock_locks("sk_lock-")
231 };
232 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
233 	_sock_locks("slock-")
234 };
235 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
236 	_sock_locks("clock-")
237 };
238 
239 static const char *const af_family_kern_key_strings[AF_MAX+1] = {
240 	_sock_locks("k-sk_lock-")
241 };
242 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = {
243 	_sock_locks("k-slock-")
244 };
245 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = {
246 	_sock_locks("k-clock-")
247 };
248 static const char *const af_family_rlock_key_strings[AF_MAX+1] = {
249 	_sock_locks("rlock-")
250 };
251 static const char *const af_family_wlock_key_strings[AF_MAX+1] = {
252 	_sock_locks("wlock-")
253 };
254 static const char *const af_family_elock_key_strings[AF_MAX+1] = {
255 	_sock_locks("elock-")
256 };
257 
258 /*
259  * sk_callback_lock and sk queues locking rules are per-address-family,
260  * so split the lock classes by using a per-AF key:
261  */
262 static struct lock_class_key af_callback_keys[AF_MAX];
263 static struct lock_class_key af_rlock_keys[AF_MAX];
264 static struct lock_class_key af_wlock_keys[AF_MAX];
265 static struct lock_class_key af_elock_keys[AF_MAX];
266 static struct lock_class_key af_kern_callback_keys[AF_MAX];
267 
268 /* Run time adjustable parameters. */
269 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
270 EXPORT_SYMBOL(sysctl_wmem_max);
271 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
272 EXPORT_SYMBOL(sysctl_rmem_max);
273 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
274 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
275 
276 /* Maximal space eaten by iovec or ancillary data plus some space */
277 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
278 EXPORT_SYMBOL(sysctl_optmem_max);
279 
280 int sysctl_tstamp_allow_data __read_mostly = 1;
281 
282 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key);
283 EXPORT_SYMBOL_GPL(memalloc_socks_key);
284 
285 /**
286  * sk_set_memalloc - sets %SOCK_MEMALLOC
287  * @sk: socket to set it on
288  *
289  * Set %SOCK_MEMALLOC on a socket for access to emergency reserves.
290  * It's the responsibility of the admin to adjust min_free_kbytes
291  * to meet the requirements
292  */
sk_set_memalloc(struct sock * sk)293 void sk_set_memalloc(struct sock *sk)
294 {
295 	sock_set_flag(sk, SOCK_MEMALLOC);
296 	sk->sk_allocation |= __GFP_MEMALLOC;
297 	static_branch_inc(&memalloc_socks_key);
298 }
299 EXPORT_SYMBOL_GPL(sk_set_memalloc);
300 
sk_clear_memalloc(struct sock * sk)301 void sk_clear_memalloc(struct sock *sk)
302 {
303 	sock_reset_flag(sk, SOCK_MEMALLOC);
304 	sk->sk_allocation &= ~__GFP_MEMALLOC;
305 	static_branch_dec(&memalloc_socks_key);
306 
307 	/*
308 	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
309 	 * progress of swapping. SOCK_MEMALLOC may be cleared while
310 	 * it has rmem allocations due to the last swapfile being deactivated
311 	 * but there is a risk that the socket is unusable due to exceeding
312 	 * the rmem limits. Reclaim the reserves and obey rmem limits again.
313 	 */
314 	sk_mem_reclaim(sk);
315 }
316 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
317 
__sk_backlog_rcv(struct sock * sk,struct sk_buff * skb)318 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
319 {
320 	int ret;
321 	unsigned int noreclaim_flag;
322 
323 	/* these should have been dropped before queueing */
324 	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
325 
326 	noreclaim_flag = memalloc_noreclaim_save();
327 	ret = sk->sk_backlog_rcv(sk, skb);
328 	memalloc_noreclaim_restore(noreclaim_flag);
329 
330 	return ret;
331 }
332 EXPORT_SYMBOL(__sk_backlog_rcv);
333 
sock_get_timeout(long timeo,void * optval,bool old_timeval)334 static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
335 {
336 	struct __kernel_sock_timeval tv;
337 
338 	if (timeo == MAX_SCHEDULE_TIMEOUT) {
339 		tv.tv_sec = 0;
340 		tv.tv_usec = 0;
341 	} else {
342 		tv.tv_sec = timeo / HZ;
343 		tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
344 	}
345 
346 	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
347 		struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
348 		*(struct old_timeval32 *)optval = tv32;
349 		return sizeof(tv32);
350 	}
351 
352 	if (old_timeval) {
353 		struct __kernel_old_timeval old_tv;
354 		old_tv.tv_sec = tv.tv_sec;
355 		old_tv.tv_usec = tv.tv_usec;
356 		*(struct __kernel_old_timeval *)optval = old_tv;
357 		return sizeof(old_tv);
358 	}
359 
360 	*(struct __kernel_sock_timeval *)optval = tv;
361 	return sizeof(tv);
362 }
363 
sock_set_timeout(long * timeo_p,sockptr_t optval,int optlen,bool old_timeval)364 static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
365 			    bool old_timeval)
366 {
367 	struct __kernel_sock_timeval tv;
368 
369 	if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
370 		struct old_timeval32 tv32;
371 
372 		if (optlen < sizeof(tv32))
373 			return -EINVAL;
374 
375 		if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
376 			return -EFAULT;
377 		tv.tv_sec = tv32.tv_sec;
378 		tv.tv_usec = tv32.tv_usec;
379 	} else if (old_timeval) {
380 		struct __kernel_old_timeval old_tv;
381 
382 		if (optlen < sizeof(old_tv))
383 			return -EINVAL;
384 		if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
385 			return -EFAULT;
386 		tv.tv_sec = old_tv.tv_sec;
387 		tv.tv_usec = old_tv.tv_usec;
388 	} else {
389 		if (optlen < sizeof(tv))
390 			return -EINVAL;
391 		if (copy_from_sockptr(&tv, optval, sizeof(tv)))
392 			return -EFAULT;
393 	}
394 	if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
395 		return -EDOM;
396 
397 	if (tv.tv_sec < 0) {
398 		static int warned __read_mostly;
399 
400 		*timeo_p = 0;
401 		if (warned < 10 && net_ratelimit()) {
402 			warned++;
403 			pr_info("%s: `%s' (pid %d) tries to set negative timeout\n",
404 				__func__, current->comm, task_pid_nr(current));
405 		}
406 		return 0;
407 	}
408 	*timeo_p = MAX_SCHEDULE_TIMEOUT;
409 	if (tv.tv_sec == 0 && tv.tv_usec == 0)
410 		return 0;
411 	if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))
412 		*timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ);
413 	return 0;
414 }
415 
sock_needs_netstamp(const struct sock * sk)416 static bool sock_needs_netstamp(const struct sock *sk)
417 {
418 	switch (sk->sk_family) {
419 	case AF_UNSPEC:
420 	case AF_UNIX:
421 		return false;
422 	default:
423 		return true;
424 	}
425 }
426 
sock_disable_timestamp(struct sock * sk,unsigned long flags)427 static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
428 {
429 	if (sk->sk_flags & flags) {
430 		sk->sk_flags &= ~flags;
431 		if (sock_needs_netstamp(sk) &&
432 		    !(sk->sk_flags & SK_FLAGS_TIMESTAMP))
433 			net_disable_timestamp();
434 	}
435 }
436 
437 
__sock_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)438 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
439 {
440 	unsigned long flags;
441 	struct sk_buff_head *list = &sk->sk_receive_queue;
442 
443 	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
444 		atomic_inc(&sk->sk_drops);
445 		trace_sock_rcvqueue_full(sk, skb);
446 		return -ENOMEM;
447 	}
448 
449 	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
450 		atomic_inc(&sk->sk_drops);
451 		return -ENOBUFS;
452 	}
453 
454 	skb->dev = NULL;
455 	skb_set_owner_r(skb, sk);
456 
457 	/* we escape from rcu protected region, make sure we dont leak
458 	 * a norefcounted dst
459 	 */
460 	skb_dst_force(skb);
461 
462 	spin_lock_irqsave(&list->lock, flags);
463 	sock_skb_set_dropcount(sk, skb);
464 	__skb_queue_tail(list, skb);
465 	spin_unlock_irqrestore(&list->lock, flags);
466 
467 	if (!sock_flag(sk, SOCK_DEAD))
468 		sk->sk_data_ready(sk);
469 	return 0;
470 }
471 EXPORT_SYMBOL(__sock_queue_rcv_skb);
472 
sock_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)473 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
474 {
475 	int err;
476 
477 	err = sk_filter(sk, skb);
478 	if (err)
479 		return err;
480 
481 	return __sock_queue_rcv_skb(sk, skb);
482 }
483 EXPORT_SYMBOL(sock_queue_rcv_skb);
484 
__sk_receive_skb(struct sock * sk,struct sk_buff * skb,const int nested,unsigned int trim_cap,bool refcounted)485 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
486 		     const int nested, unsigned int trim_cap, bool refcounted)
487 {
488 	int rc = NET_RX_SUCCESS;
489 
490 	if (sk_filter_trim_cap(sk, skb, trim_cap))
491 		goto discard_and_relse;
492 
493 	skb->dev = NULL;
494 
495 	if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
496 		atomic_inc(&sk->sk_drops);
497 		goto discard_and_relse;
498 	}
499 	if (nested)
500 		bh_lock_sock_nested(sk);
501 	else
502 		bh_lock_sock(sk);
503 	if (!sock_owned_by_user(sk)) {
504 		/*
505 		 * trylock + unlock semantics:
506 		 */
507 		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
508 
509 		rc = sk_backlog_rcv(sk, skb);
510 
511 		mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
512 	} else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
513 		bh_unlock_sock(sk);
514 		atomic_inc(&sk->sk_drops);
515 		goto discard_and_relse;
516 	}
517 
518 	bh_unlock_sock(sk);
519 out:
520 	if (refcounted)
521 		sock_put(sk);
522 	return rc;
523 discard_and_relse:
524 	kfree_skb(skb);
525 	goto out;
526 }
527 EXPORT_SYMBOL(__sk_receive_skb);
528 
__sk_dst_check(struct sock * sk,u32 cookie)529 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
530 {
531 	struct dst_entry *dst = __sk_dst_get(sk);
532 
533 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
534 		sk_tx_queue_clear(sk);
535 		sk->sk_dst_pending_confirm = 0;
536 		RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
537 		dst_release(dst);
538 		return NULL;
539 	}
540 
541 	return dst;
542 }
543 EXPORT_SYMBOL(__sk_dst_check);
544 
sk_dst_check(struct sock * sk,u32 cookie)545 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
546 {
547 	struct dst_entry *dst = sk_dst_get(sk);
548 
549 	if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
550 		sk_dst_reset(sk);
551 		dst_release(dst);
552 		return NULL;
553 	}
554 
555 	return dst;
556 }
557 EXPORT_SYMBOL(sk_dst_check);
558 
sock_bindtoindex_locked(struct sock * sk,int ifindex)559 static int sock_bindtoindex_locked(struct sock *sk, int ifindex)
560 {
561 	int ret = -ENOPROTOOPT;
562 #ifdef CONFIG_NETDEVICES
563 	struct net *net = sock_net(sk);
564 
565 	/* Sorry... */
566 	ret = -EPERM;
567 	if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW))
568 		goto out;
569 
570 	ret = -EINVAL;
571 	if (ifindex < 0)
572 		goto out;
573 
574 	sk->sk_bound_dev_if = ifindex;
575 	if (sk->sk_prot->rehash)
576 		sk->sk_prot->rehash(sk);
577 	sk_dst_reset(sk);
578 
579 	ret = 0;
580 
581 out:
582 #endif
583 
584 	return ret;
585 }
586 
sock_bindtoindex(struct sock * sk,int ifindex,bool lock_sk)587 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
588 {
589 	int ret;
590 
591 	if (lock_sk)
592 		lock_sock(sk);
593 	ret = sock_bindtoindex_locked(sk, ifindex);
594 	if (lock_sk)
595 		release_sock(sk);
596 
597 	return ret;
598 }
599 EXPORT_SYMBOL(sock_bindtoindex);
600 
sock_setbindtodevice(struct sock * sk,sockptr_t optval,int optlen)601 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
602 {
603 	int ret = -ENOPROTOOPT;
604 #ifdef CONFIG_NETDEVICES
605 	struct net *net = sock_net(sk);
606 	char devname[IFNAMSIZ];
607 	int index;
608 
609 	ret = -EINVAL;
610 	if (optlen < 0)
611 		goto out;
612 
613 	/* Bind this socket to a particular device like "eth0",
614 	 * as specified in the passed interface name. If the
615 	 * name is "" or the option length is zero the socket
616 	 * is not bound.
617 	 */
618 	if (optlen > IFNAMSIZ - 1)
619 		optlen = IFNAMSIZ - 1;
620 	memset(devname, 0, sizeof(devname));
621 
622 	ret = -EFAULT;
623 	if (copy_from_sockptr(devname, optval, optlen))
624 		goto out;
625 
626 	index = 0;
627 	if (devname[0] != '\0') {
628 		struct net_device *dev;
629 
630 		rcu_read_lock();
631 		dev = dev_get_by_name_rcu(net, devname);
632 		if (dev)
633 			index = dev->ifindex;
634 		rcu_read_unlock();
635 		ret = -ENODEV;
636 		if (!dev)
637 			goto out;
638 	}
639 
640 	return sock_bindtoindex(sk, index, true);
641 out:
642 #endif
643 
644 	return ret;
645 }
646 
sock_getbindtodevice(struct sock * sk,char __user * optval,int __user * optlen,int len)647 static int sock_getbindtodevice(struct sock *sk, char __user *optval,
648 				int __user *optlen, int len)
649 {
650 	int ret = -ENOPROTOOPT;
651 #ifdef CONFIG_NETDEVICES
652 	struct net *net = sock_net(sk);
653 	char devname[IFNAMSIZ];
654 
655 	if (sk->sk_bound_dev_if == 0) {
656 		len = 0;
657 		goto zero;
658 	}
659 
660 	ret = -EINVAL;
661 	if (len < IFNAMSIZ)
662 		goto out;
663 
664 	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
665 	if (ret)
666 		goto out;
667 
668 	len = strlen(devname) + 1;
669 
670 	ret = -EFAULT;
671 	if (copy_to_user(optval, devname, len))
672 		goto out;
673 
674 zero:
675 	ret = -EFAULT;
676 	if (put_user(len, optlen))
677 		goto out;
678 
679 	ret = 0;
680 
681 out:
682 #endif
683 
684 	return ret;
685 }
686 
sk_mc_loop(struct sock * sk)687 bool sk_mc_loop(struct sock *sk)
688 {
689 	if (dev_recursion_level())
690 		return false;
691 	if (!sk)
692 		return true;
693 	switch (sk->sk_family) {
694 	case AF_INET:
695 		return inet_sk(sk)->mc_loop;
696 #if IS_ENABLED(CONFIG_IPV6)
697 	case AF_INET6:
698 		return inet6_sk(sk)->mc_loop;
699 #endif
700 	}
701 	WARN_ON_ONCE(1);
702 	return true;
703 }
704 EXPORT_SYMBOL(sk_mc_loop);
705 
sock_set_reuseaddr(struct sock * sk)706 void sock_set_reuseaddr(struct sock *sk)
707 {
708 	lock_sock(sk);
709 	sk->sk_reuse = SK_CAN_REUSE;
710 	release_sock(sk);
711 }
712 EXPORT_SYMBOL(sock_set_reuseaddr);
713 
sock_set_reuseport(struct sock * sk)714 void sock_set_reuseport(struct sock *sk)
715 {
716 	lock_sock(sk);
717 	sk->sk_reuseport = true;
718 	release_sock(sk);
719 }
720 EXPORT_SYMBOL(sock_set_reuseport);
721 
sock_no_linger(struct sock * sk)722 void sock_no_linger(struct sock *sk)
723 {
724 	lock_sock(sk);
725 	sk->sk_lingertime = 0;
726 	sock_set_flag(sk, SOCK_LINGER);
727 	release_sock(sk);
728 }
729 EXPORT_SYMBOL(sock_no_linger);
730 
sock_set_priority(struct sock * sk,u32 priority)731 void sock_set_priority(struct sock *sk, u32 priority)
732 {
733 	lock_sock(sk);
734 	sk->sk_priority = priority;
735 	release_sock(sk);
736 }
737 EXPORT_SYMBOL(sock_set_priority);
738 
sock_set_sndtimeo(struct sock * sk,s64 secs)739 void sock_set_sndtimeo(struct sock *sk, s64 secs)
740 {
741 	lock_sock(sk);
742 	if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
743 		sk->sk_sndtimeo = secs * HZ;
744 	else
745 		sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
746 	release_sock(sk);
747 }
748 EXPORT_SYMBOL(sock_set_sndtimeo);
749 
__sock_set_timestamps(struct sock * sk,bool val,bool new,bool ns)750 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
751 {
752 	if (val)  {
753 		sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new);
754 		sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns);
755 		sock_set_flag(sk, SOCK_RCVTSTAMP);
756 		sock_enable_timestamp(sk, SOCK_TIMESTAMP);
757 	} else {
758 		sock_reset_flag(sk, SOCK_RCVTSTAMP);
759 		sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
760 	}
761 }
762 
sock_enable_timestamps(struct sock * sk)763 void sock_enable_timestamps(struct sock *sk)
764 {
765 	lock_sock(sk);
766 	__sock_set_timestamps(sk, true, false, true);
767 	release_sock(sk);
768 }
769 EXPORT_SYMBOL(sock_enable_timestamps);
770 
sock_set_keepalive(struct sock * sk)771 void sock_set_keepalive(struct sock *sk)
772 {
773 	lock_sock(sk);
774 	if (sk->sk_prot->keepalive)
775 		sk->sk_prot->keepalive(sk, true);
776 	sock_valbool_flag(sk, SOCK_KEEPOPEN, true);
777 	release_sock(sk);
778 }
779 EXPORT_SYMBOL(sock_set_keepalive);
780 
__sock_set_rcvbuf(struct sock * sk,int val)781 static void __sock_set_rcvbuf(struct sock *sk, int val)
782 {
783 	/* Ensure val * 2 fits into an int, to prevent max_t() from treating it
784 	 * as a negative value.
785 	 */
786 	val = min_t(int, val, INT_MAX / 2);
787 	sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
788 
789 	/* We double it on the way in to account for "struct sk_buff" etc.
790 	 * overhead.   Applications assume that the SO_RCVBUF setting they make
791 	 * will allow that much actual data to be received on that socket.
792 	 *
793 	 * Applications are unaware that "struct sk_buff" and other overheads
794 	 * allocate from the receive buffer during socket buffer allocation.
795 	 *
796 	 * And after considering the possible alternatives, returning the value
797 	 * we actually used in getsockopt is the most desirable behavior.
798 	 */
799 	WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF));
800 }
801 
sock_set_rcvbuf(struct sock * sk,int val)802 void sock_set_rcvbuf(struct sock *sk, int val)
803 {
804 	lock_sock(sk);
805 	__sock_set_rcvbuf(sk, val);
806 	release_sock(sk);
807 }
808 EXPORT_SYMBOL(sock_set_rcvbuf);
809 
__sock_set_mark(struct sock * sk,u32 val)810 static void __sock_set_mark(struct sock *sk, u32 val)
811 {
812 	if (val != sk->sk_mark) {
813 		sk->sk_mark = val;
814 		sk_dst_reset(sk);
815 	}
816 }
817 
sock_set_mark(struct sock * sk,u32 val)818 void sock_set_mark(struct sock *sk, u32 val)
819 {
820 	lock_sock(sk);
821 	__sock_set_mark(sk, val);
822 	release_sock(sk);
823 }
824 EXPORT_SYMBOL(sock_set_mark);
825 
826 /*
827  *	This is meant for all protocols to use and covers goings on
828  *	at the socket level. Everything here is generic.
829  */
830 
sock_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)831 int sock_setsockopt(struct socket *sock, int level, int optname,
832 		    sockptr_t optval, unsigned int optlen)
833 {
834 	struct sock_txtime sk_txtime;
835 	struct sock *sk = sock->sk;
836 	int val;
837 	int valbool;
838 	struct linger ling;
839 	int ret = 0;
840 
841 	/*
842 	 *	Options without arguments
843 	 */
844 
845 	if (optname == SO_BINDTODEVICE)
846 		return sock_setbindtodevice(sk, optval, optlen);
847 
848 	if (optlen < sizeof(int))
849 		return -EINVAL;
850 
851 	if (copy_from_sockptr(&val, optval, sizeof(val)))
852 		return -EFAULT;
853 
854 	valbool = val ? 1 : 0;
855 
856 	lock_sock(sk);
857 
858 	switch (optname) {
859 	case SO_DEBUG:
860 		if (val && !capable(CAP_NET_ADMIN))
861 			ret = -EACCES;
862 		else
863 			sock_valbool_flag(sk, SOCK_DBG, valbool);
864 		break;
865 	case SO_REUSEADDR:
866 		sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
867 		break;
868 	case SO_REUSEPORT:
869 		sk->sk_reuseport = valbool;
870 		break;
871 	case SO_TYPE:
872 	case SO_PROTOCOL:
873 	case SO_DOMAIN:
874 	case SO_ERROR:
875 		ret = -ENOPROTOOPT;
876 		break;
877 	case SO_DONTROUTE:
878 		sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
879 		sk_dst_reset(sk);
880 		break;
881 	case SO_BROADCAST:
882 		sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
883 		break;
884 	case SO_SNDBUF:
885 		/* Don't error on this BSD doesn't and if you think
886 		 * about it this is right. Otherwise apps have to
887 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
888 		 * are treated in BSD as hints
889 		 */
890 		val = min_t(u32, val, sysctl_wmem_max);
891 set_sndbuf:
892 		/* Ensure val * 2 fits into an int, to prevent max_t()
893 		 * from treating it as a negative value.
894 		 */
895 		val = min_t(int, val, INT_MAX / 2);
896 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
897 		WRITE_ONCE(sk->sk_sndbuf,
898 			   max_t(int, val * 2, SOCK_MIN_SNDBUF));
899 		/* Wake up sending tasks if we upped the value. */
900 		sk->sk_write_space(sk);
901 		break;
902 
903 	case SO_SNDBUFFORCE:
904 		if (!capable(CAP_NET_ADMIN)) {
905 			ret = -EPERM;
906 			break;
907 		}
908 
909 		/* No negative values (to prevent underflow, as val will be
910 		 * multiplied by 2).
911 		 */
912 		if (val < 0)
913 			val = 0;
914 		goto set_sndbuf;
915 
916 	case SO_RCVBUF:
917 		/* Don't error on this BSD doesn't and if you think
918 		 * about it this is right. Otherwise apps have to
919 		 * play 'guess the biggest size' games. RCVBUF/SNDBUF
920 		 * are treated in BSD as hints
921 		 */
922 		__sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max));
923 		break;
924 
925 	case SO_RCVBUFFORCE:
926 		if (!capable(CAP_NET_ADMIN)) {
927 			ret = -EPERM;
928 			break;
929 		}
930 
931 		/* No negative values (to prevent underflow, as val will be
932 		 * multiplied by 2).
933 		 */
934 		__sock_set_rcvbuf(sk, max(val, 0));
935 		break;
936 
937 	case SO_KEEPALIVE:
938 		if (sk->sk_prot->keepalive)
939 			sk->sk_prot->keepalive(sk, valbool);
940 		sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
941 		break;
942 
943 	case SO_OOBINLINE:
944 		sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
945 		break;
946 
947 	case SO_NO_CHECK:
948 		sk->sk_no_check_tx = valbool;
949 		break;
950 
951 	case SO_PRIORITY:
952 		if ((val >= 0 && val <= 6) ||
953 		    ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
954 			sk->sk_priority = val;
955 		else
956 			ret = -EPERM;
957 		break;
958 
959 	case SO_LINGER:
960 		if (optlen < sizeof(ling)) {
961 			ret = -EINVAL;	/* 1003.1g */
962 			break;
963 		}
964 		if (copy_from_sockptr(&ling, optval, sizeof(ling))) {
965 			ret = -EFAULT;
966 			break;
967 		}
968 		if (!ling.l_onoff)
969 			sock_reset_flag(sk, SOCK_LINGER);
970 		else {
971 #if (BITS_PER_LONG == 32)
972 			if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
973 				sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
974 			else
975 #endif
976 				sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
977 			sock_set_flag(sk, SOCK_LINGER);
978 		}
979 		break;
980 
981 	case SO_BSDCOMPAT:
982 		break;
983 
984 	case SO_PASSCRED:
985 		if (valbool)
986 			set_bit(SOCK_PASSCRED, &sock->flags);
987 		else
988 			clear_bit(SOCK_PASSCRED, &sock->flags);
989 		break;
990 
991 	case SO_TIMESTAMP_OLD:
992 		__sock_set_timestamps(sk, valbool, false, false);
993 		break;
994 	case SO_TIMESTAMP_NEW:
995 		__sock_set_timestamps(sk, valbool, true, false);
996 		break;
997 	case SO_TIMESTAMPNS_OLD:
998 		__sock_set_timestamps(sk, valbool, false, true);
999 		break;
1000 	case SO_TIMESTAMPNS_NEW:
1001 		__sock_set_timestamps(sk, valbool, true, true);
1002 		break;
1003 	case SO_TIMESTAMPING_NEW:
1004 	case SO_TIMESTAMPING_OLD:
1005 		if (val & ~SOF_TIMESTAMPING_MASK) {
1006 			ret = -EINVAL;
1007 			break;
1008 		}
1009 
1010 		if (val & SOF_TIMESTAMPING_OPT_ID &&
1011 		    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
1012 			if (sk->sk_protocol == IPPROTO_TCP &&
1013 			    sk->sk_type == SOCK_STREAM) {
1014 				if ((1 << sk->sk_state) &
1015 				    (TCPF_CLOSE | TCPF_LISTEN)) {
1016 					ret = -EINVAL;
1017 					break;
1018 				}
1019 				sk->sk_tskey = tcp_sk(sk)->snd_una;
1020 			} else {
1021 				sk->sk_tskey = 0;
1022 			}
1023 		}
1024 
1025 		if (val & SOF_TIMESTAMPING_OPT_STATS &&
1026 		    !(val & SOF_TIMESTAMPING_OPT_TSONLY)) {
1027 			ret = -EINVAL;
1028 			break;
1029 		}
1030 
1031 		sk->sk_tsflags = val;
1032 		sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
1033 
1034 		if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
1035 			sock_enable_timestamp(sk,
1036 					      SOCK_TIMESTAMPING_RX_SOFTWARE);
1037 		else
1038 			sock_disable_timestamp(sk,
1039 					       (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
1040 		break;
1041 
1042 	case SO_RCVLOWAT:
1043 		if (val < 0)
1044 			val = INT_MAX;
1045 		if (sock->ops->set_rcvlowat)
1046 			ret = sock->ops->set_rcvlowat(sk, val);
1047 		else
1048 			WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1049 		break;
1050 
1051 	case SO_RCVTIMEO_OLD:
1052 	case SO_RCVTIMEO_NEW:
1053 		ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
1054 				       optlen, optname == SO_RCVTIMEO_OLD);
1055 		break;
1056 
1057 	case SO_SNDTIMEO_OLD:
1058 	case SO_SNDTIMEO_NEW:
1059 		ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
1060 				       optlen, optname == SO_SNDTIMEO_OLD);
1061 		break;
1062 
1063 	case SO_ATTACH_FILTER: {
1064 		struct sock_fprog fprog;
1065 
1066 		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1067 		if (!ret)
1068 			ret = sk_attach_filter(&fprog, sk);
1069 		break;
1070 	}
1071 	case SO_ATTACH_BPF:
1072 		ret = -EINVAL;
1073 		if (optlen == sizeof(u32)) {
1074 			u32 ufd;
1075 
1076 			ret = -EFAULT;
1077 			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1078 				break;
1079 
1080 			ret = sk_attach_bpf(ufd, sk);
1081 		}
1082 		break;
1083 
1084 	case SO_ATTACH_REUSEPORT_CBPF: {
1085 		struct sock_fprog fprog;
1086 
1087 		ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
1088 		if (!ret)
1089 			ret = sk_reuseport_attach_filter(&fprog, sk);
1090 		break;
1091 	}
1092 	case SO_ATTACH_REUSEPORT_EBPF:
1093 		ret = -EINVAL;
1094 		if (optlen == sizeof(u32)) {
1095 			u32 ufd;
1096 
1097 			ret = -EFAULT;
1098 			if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
1099 				break;
1100 
1101 			ret = sk_reuseport_attach_bpf(ufd, sk);
1102 		}
1103 		break;
1104 
1105 	case SO_DETACH_REUSEPORT_BPF:
1106 		ret = reuseport_detach_prog(sk);
1107 		break;
1108 
1109 	case SO_DETACH_FILTER:
1110 		ret = sk_detach_filter(sk);
1111 		break;
1112 
1113 	case SO_LOCK_FILTER:
1114 		if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool)
1115 			ret = -EPERM;
1116 		else
1117 			sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool);
1118 		break;
1119 
1120 	case SO_PASSSEC:
1121 		if (valbool)
1122 			set_bit(SOCK_PASSSEC, &sock->flags);
1123 		else
1124 			clear_bit(SOCK_PASSSEC, &sock->flags);
1125 		break;
1126 	case SO_MARK:
1127 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1128 			ret = -EPERM;
1129 			break;
1130 		}
1131 
1132 		__sock_set_mark(sk, val);
1133 		break;
1134 
1135 	case SO_RXQ_OVFL:
1136 		sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool);
1137 		break;
1138 
1139 	case SO_WIFI_STATUS:
1140 		sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool);
1141 		break;
1142 
1143 	case SO_PEEK_OFF:
1144 		if (sock->ops->set_peek_off)
1145 			ret = sock->ops->set_peek_off(sk, val);
1146 		else
1147 			ret = -EOPNOTSUPP;
1148 		break;
1149 
1150 	case SO_NOFCS:
1151 		sock_valbool_flag(sk, SOCK_NOFCS, valbool);
1152 		break;
1153 
1154 	case SO_SELECT_ERR_QUEUE:
1155 		sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
1156 		break;
1157 
1158 #ifdef CONFIG_NET_RX_BUSY_POLL
1159 	case SO_BUSY_POLL:
1160 		/* allow unprivileged users to decrease the value */
1161 		if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
1162 			ret = -EPERM;
1163 		else {
1164 			if (val < 0)
1165 				ret = -EINVAL;
1166 			else
1167 				WRITE_ONCE(sk->sk_ll_usec, val);
1168 		}
1169 		break;
1170 #endif
1171 
1172 	case SO_MAX_PACING_RATE:
1173 		{
1174 		unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val;
1175 
1176 		if (sizeof(ulval) != sizeof(val) &&
1177 		    optlen >= sizeof(ulval) &&
1178 		    copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
1179 			ret = -EFAULT;
1180 			break;
1181 		}
1182 		if (ulval != ~0UL)
1183 			cmpxchg(&sk->sk_pacing_status,
1184 				SK_PACING_NONE,
1185 				SK_PACING_NEEDED);
1186 		sk->sk_max_pacing_rate = ulval;
1187 		sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval);
1188 		break;
1189 		}
1190 	case SO_INCOMING_CPU:
1191 		WRITE_ONCE(sk->sk_incoming_cpu, val);
1192 		break;
1193 
1194 	case SO_CNX_ADVICE:
1195 		if (val == 1)
1196 			dst_negative_advice(sk);
1197 		break;
1198 
1199 	case SO_ZEROCOPY:
1200 		if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
1201 			if (!((sk->sk_type == SOCK_STREAM &&
1202 			       sk->sk_protocol == IPPROTO_TCP) ||
1203 			      (sk->sk_type == SOCK_DGRAM &&
1204 			       sk->sk_protocol == IPPROTO_UDP)))
1205 				ret = -ENOTSUPP;
1206 		} else if (sk->sk_family != PF_RDS) {
1207 			ret = -ENOTSUPP;
1208 		}
1209 		if (!ret) {
1210 			if (val < 0 || val > 1)
1211 				ret = -EINVAL;
1212 			else
1213 				sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool);
1214 		}
1215 		break;
1216 
1217 	case SO_TXTIME:
1218 		if (optlen != sizeof(struct sock_txtime)) {
1219 			ret = -EINVAL;
1220 			break;
1221 		} else if (copy_from_sockptr(&sk_txtime, optval,
1222 			   sizeof(struct sock_txtime))) {
1223 			ret = -EFAULT;
1224 			break;
1225 		} else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) {
1226 			ret = -EINVAL;
1227 			break;
1228 		}
1229 		/* CLOCK_MONOTONIC is only used by sch_fq, and this packet
1230 		 * scheduler has enough safe guards.
1231 		 */
1232 		if (sk_txtime.clockid != CLOCK_MONOTONIC &&
1233 		    !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
1234 			ret = -EPERM;
1235 			break;
1236 		}
1237 		sock_valbool_flag(sk, SOCK_TXTIME, true);
1238 		sk->sk_clockid = sk_txtime.clockid;
1239 		sk->sk_txtime_deadline_mode =
1240 			!!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE);
1241 		sk->sk_txtime_report_errors =
1242 			!!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS);
1243 		break;
1244 
1245 	case SO_BINDTOIFINDEX:
1246 		ret = sock_bindtoindex_locked(sk, val);
1247 		break;
1248 
1249 	default:
1250 		ret = -ENOPROTOOPT;
1251 		break;
1252 	}
1253 	release_sock(sk);
1254 	return ret;
1255 }
1256 EXPORT_SYMBOL(sock_setsockopt);
1257 
sk_get_peer_cred(struct sock * sk)1258 static const struct cred *sk_get_peer_cred(struct sock *sk)
1259 {
1260 	const struct cred *cred;
1261 
1262 	spin_lock(&sk->sk_peer_lock);
1263 	cred = get_cred(sk->sk_peer_cred);
1264 	spin_unlock(&sk->sk_peer_lock);
1265 
1266 	return cred;
1267 }
1268 
cred_to_ucred(struct pid * pid,const struct cred * cred,struct ucred * ucred)1269 static void cred_to_ucred(struct pid *pid, const struct cred *cred,
1270 			  struct ucred *ucred)
1271 {
1272 	ucred->pid = pid_vnr(pid);
1273 	ucred->uid = ucred->gid = -1;
1274 	if (cred) {
1275 		struct user_namespace *current_ns = current_user_ns();
1276 
1277 		ucred->uid = from_kuid_munged(current_ns, cred->euid);
1278 		ucred->gid = from_kgid_munged(current_ns, cred->egid);
1279 	}
1280 }
1281 
groups_to_user(gid_t __user * dst,const struct group_info * src)1282 static int groups_to_user(gid_t __user *dst, const struct group_info *src)
1283 {
1284 	struct user_namespace *user_ns = current_user_ns();
1285 	int i;
1286 
1287 	for (i = 0; i < src->ngroups; i++)
1288 		if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i))
1289 			return -EFAULT;
1290 
1291 	return 0;
1292 }
1293 
sock_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1294 int sock_getsockopt(struct socket *sock, int level, int optname,
1295 		    char __user *optval, int __user *optlen)
1296 {
1297 	struct sock *sk = sock->sk;
1298 
1299 	union {
1300 		int val;
1301 		u64 val64;
1302 		unsigned long ulval;
1303 		struct linger ling;
1304 		struct old_timeval32 tm32;
1305 		struct __kernel_old_timeval tm;
1306 		struct  __kernel_sock_timeval stm;
1307 		struct sock_txtime txtime;
1308 	} v;
1309 
1310 	int lv = sizeof(int);
1311 	int len;
1312 
1313 	if (get_user(len, optlen))
1314 		return -EFAULT;
1315 	if (len < 0)
1316 		return -EINVAL;
1317 
1318 	memset(&v, 0, sizeof(v));
1319 
1320 	switch (optname) {
1321 	case SO_DEBUG:
1322 		v.val = sock_flag(sk, SOCK_DBG);
1323 		break;
1324 
1325 	case SO_DONTROUTE:
1326 		v.val = sock_flag(sk, SOCK_LOCALROUTE);
1327 		break;
1328 
1329 	case SO_BROADCAST:
1330 		v.val = sock_flag(sk, SOCK_BROADCAST);
1331 		break;
1332 
1333 	case SO_SNDBUF:
1334 		v.val = sk->sk_sndbuf;
1335 		break;
1336 
1337 	case SO_RCVBUF:
1338 		v.val = sk->sk_rcvbuf;
1339 		break;
1340 
1341 	case SO_REUSEADDR:
1342 		v.val = sk->sk_reuse;
1343 		break;
1344 
1345 	case SO_REUSEPORT:
1346 		v.val = sk->sk_reuseport;
1347 		break;
1348 
1349 	case SO_KEEPALIVE:
1350 		v.val = sock_flag(sk, SOCK_KEEPOPEN);
1351 		break;
1352 
1353 	case SO_TYPE:
1354 		v.val = sk->sk_type;
1355 		break;
1356 
1357 	case SO_PROTOCOL:
1358 		v.val = sk->sk_protocol;
1359 		break;
1360 
1361 	case SO_DOMAIN:
1362 		v.val = sk->sk_family;
1363 		break;
1364 
1365 	case SO_ERROR:
1366 		v.val = -sock_error(sk);
1367 		if (v.val == 0)
1368 			v.val = xchg(&sk->sk_err_soft, 0);
1369 		break;
1370 
1371 	case SO_OOBINLINE:
1372 		v.val = sock_flag(sk, SOCK_URGINLINE);
1373 		break;
1374 
1375 	case SO_NO_CHECK:
1376 		v.val = sk->sk_no_check_tx;
1377 		break;
1378 
1379 	case SO_PRIORITY:
1380 		v.val = sk->sk_priority;
1381 		break;
1382 
1383 	case SO_LINGER:
1384 		lv		= sizeof(v.ling);
1385 		v.ling.l_onoff	= sock_flag(sk, SOCK_LINGER);
1386 		v.ling.l_linger	= sk->sk_lingertime / HZ;
1387 		break;
1388 
1389 	case SO_BSDCOMPAT:
1390 		break;
1391 
1392 	case SO_TIMESTAMP_OLD:
1393 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
1394 				!sock_flag(sk, SOCK_TSTAMP_NEW) &&
1395 				!sock_flag(sk, SOCK_RCVTSTAMPNS);
1396 		break;
1397 
1398 	case SO_TIMESTAMPNS_OLD:
1399 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW);
1400 		break;
1401 
1402 	case SO_TIMESTAMP_NEW:
1403 		v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW);
1404 		break;
1405 
1406 	case SO_TIMESTAMPNS_NEW:
1407 		v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW);
1408 		break;
1409 
1410 	case SO_TIMESTAMPING_OLD:
1411 		v.val = sk->sk_tsflags;
1412 		break;
1413 
1414 	case SO_RCVTIMEO_OLD:
1415 	case SO_RCVTIMEO_NEW:
1416 		lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname);
1417 		break;
1418 
1419 	case SO_SNDTIMEO_OLD:
1420 	case SO_SNDTIMEO_NEW:
1421 		lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname);
1422 		break;
1423 
1424 	case SO_RCVLOWAT:
1425 		v.val = sk->sk_rcvlowat;
1426 		break;
1427 
1428 	case SO_SNDLOWAT:
1429 		v.val = 1;
1430 		break;
1431 
1432 	case SO_PASSCRED:
1433 		v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
1434 		break;
1435 
1436 	case SO_PEERCRED:
1437 	{
1438 		struct ucred peercred;
1439 		if (len > sizeof(peercred))
1440 			len = sizeof(peercred);
1441 
1442 		spin_lock(&sk->sk_peer_lock);
1443 		cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
1444 		spin_unlock(&sk->sk_peer_lock);
1445 
1446 		if (copy_to_user(optval, &peercred, len))
1447 			return -EFAULT;
1448 		goto lenout;
1449 	}
1450 
1451 	case SO_PEERGROUPS:
1452 	{
1453 		const struct cred *cred;
1454 		int ret, n;
1455 
1456 		cred = sk_get_peer_cred(sk);
1457 		if (!cred)
1458 			return -ENODATA;
1459 
1460 		n = cred->group_info->ngroups;
1461 		if (len < n * sizeof(gid_t)) {
1462 			len = n * sizeof(gid_t);
1463 			put_cred(cred);
1464 			return put_user(len, optlen) ? -EFAULT : -ERANGE;
1465 		}
1466 		len = n * sizeof(gid_t);
1467 
1468 		ret = groups_to_user((gid_t __user *)optval, cred->group_info);
1469 		put_cred(cred);
1470 		if (ret)
1471 			return ret;
1472 		goto lenout;
1473 	}
1474 
1475 	case SO_PEERNAME:
1476 	{
1477 		char address[128];
1478 
1479 		lv = sock->ops->getname(sock, (struct sockaddr *)address, 2);
1480 		if (lv < 0)
1481 			return -ENOTCONN;
1482 		if (lv < len)
1483 			return -EINVAL;
1484 		if (copy_to_user(optval, address, len))
1485 			return -EFAULT;
1486 		goto lenout;
1487 	}
1488 
1489 	/* Dubious BSD thing... Probably nobody even uses it, but
1490 	 * the UNIX standard wants it for whatever reason... -DaveM
1491 	 */
1492 	case SO_ACCEPTCONN:
1493 		v.val = sk->sk_state == TCP_LISTEN;
1494 		break;
1495 
1496 	case SO_PASSSEC:
1497 		v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1498 		break;
1499 
1500 	case SO_PEERSEC:
1501 		return security_socket_getpeersec_stream(sock, optval, optlen, len);
1502 
1503 	case SO_MARK:
1504 		v.val = sk->sk_mark;
1505 		break;
1506 
1507 	case SO_RXQ_OVFL:
1508 		v.val = sock_flag(sk, SOCK_RXQ_OVFL);
1509 		break;
1510 
1511 	case SO_WIFI_STATUS:
1512 		v.val = sock_flag(sk, SOCK_WIFI_STATUS);
1513 		break;
1514 
1515 	case SO_PEEK_OFF:
1516 		if (!sock->ops->set_peek_off)
1517 			return -EOPNOTSUPP;
1518 
1519 		v.val = sk->sk_peek_off;
1520 		break;
1521 	case SO_NOFCS:
1522 		v.val = sock_flag(sk, SOCK_NOFCS);
1523 		break;
1524 
1525 	case SO_BINDTODEVICE:
1526 		return sock_getbindtodevice(sk, optval, optlen, len);
1527 
1528 	case SO_GET_FILTER:
1529 		len = sk_get_filter(sk, (struct sock_filter __user *)optval, len);
1530 		if (len < 0)
1531 			return len;
1532 
1533 		goto lenout;
1534 
1535 	case SO_LOCK_FILTER:
1536 		v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1537 		break;
1538 
1539 	case SO_BPF_EXTENSIONS:
1540 		v.val = bpf_tell_extensions();
1541 		break;
1542 
1543 	case SO_SELECT_ERR_QUEUE:
1544 		v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1545 		break;
1546 
1547 #ifdef CONFIG_NET_RX_BUSY_POLL
1548 	case SO_BUSY_POLL:
1549 		v.val = sk->sk_ll_usec;
1550 		break;
1551 #endif
1552 
1553 	case SO_MAX_PACING_RATE:
1554 		if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) {
1555 			lv = sizeof(v.ulval);
1556 			v.ulval = sk->sk_max_pacing_rate;
1557 		} else {
1558 			/* 32bit version */
1559 			v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U);
1560 		}
1561 		break;
1562 
1563 	case SO_INCOMING_CPU:
1564 		v.val = READ_ONCE(sk->sk_incoming_cpu);
1565 		break;
1566 
1567 	case SO_MEMINFO:
1568 	{
1569 		u32 meminfo[SK_MEMINFO_VARS];
1570 
1571 		sk_get_meminfo(sk, meminfo);
1572 
1573 		len = min_t(unsigned int, len, sizeof(meminfo));
1574 		if (copy_to_user(optval, &meminfo, len))
1575 			return -EFAULT;
1576 
1577 		goto lenout;
1578 	}
1579 
1580 #ifdef CONFIG_NET_RX_BUSY_POLL
1581 	case SO_INCOMING_NAPI_ID:
1582 		v.val = READ_ONCE(sk->sk_napi_id);
1583 
1584 		/* aggregate non-NAPI IDs down to 0 */
1585 		if (v.val < MIN_NAPI_ID)
1586 			v.val = 0;
1587 
1588 		break;
1589 #endif
1590 
1591 	case SO_COOKIE:
1592 		lv = sizeof(u64);
1593 		if (len < lv)
1594 			return -EINVAL;
1595 		v.val64 = sock_gen_cookie(sk);
1596 		break;
1597 
1598 	case SO_ZEROCOPY:
1599 		v.val = sock_flag(sk, SOCK_ZEROCOPY);
1600 		break;
1601 
1602 	case SO_TXTIME:
1603 		lv = sizeof(v.txtime);
1604 		v.txtime.clockid = sk->sk_clockid;
1605 		v.txtime.flags |= sk->sk_txtime_deadline_mode ?
1606 				  SOF_TXTIME_DEADLINE_MODE : 0;
1607 		v.txtime.flags |= sk->sk_txtime_report_errors ?
1608 				  SOF_TXTIME_REPORT_ERRORS : 0;
1609 		break;
1610 
1611 	case SO_BINDTOIFINDEX:
1612 		v.val = sk->sk_bound_dev_if;
1613 		break;
1614 
1615 	default:
1616 		/* We implement the SO_SNDLOWAT etc to not be settable
1617 		 * (1003.1g 7).
1618 		 */
1619 		return -ENOPROTOOPT;
1620 	}
1621 
1622 	if (len > lv)
1623 		len = lv;
1624 	if (copy_to_user(optval, &v, len))
1625 		return -EFAULT;
1626 lenout:
1627 	if (put_user(len, optlen))
1628 		return -EFAULT;
1629 	return 0;
1630 }
1631 
1632 /*
1633  * Initialize an sk_lock.
1634  *
1635  * (We also register the sk_lock with the lock validator.)
1636  */
sock_lock_init(struct sock * sk)1637 static inline void sock_lock_init(struct sock *sk)
1638 {
1639 	if (sk->sk_kern_sock)
1640 		sock_lock_init_class_and_name(
1641 			sk,
1642 			af_family_kern_slock_key_strings[sk->sk_family],
1643 			af_family_kern_slock_keys + sk->sk_family,
1644 			af_family_kern_key_strings[sk->sk_family],
1645 			af_family_kern_keys + sk->sk_family);
1646 	else
1647 		sock_lock_init_class_and_name(
1648 			sk,
1649 			af_family_slock_key_strings[sk->sk_family],
1650 			af_family_slock_keys + sk->sk_family,
1651 			af_family_key_strings[sk->sk_family],
1652 			af_family_keys + sk->sk_family);
1653 }
1654 
1655 /*
1656  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
1657  * even temporarly, because of RCU lookups. sk_node should also be left as is.
1658  * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
1659  */
sock_copy(struct sock * nsk,const struct sock * osk)1660 static void sock_copy(struct sock *nsk, const struct sock *osk)
1661 {
1662 	const struct proto *prot = READ_ONCE(osk->sk_prot);
1663 #ifdef CONFIG_SECURITY_NETWORK
1664 	void *sptr = nsk->sk_security;
1665 #endif
1666 	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
1667 
1668 	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
1669 	       prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
1670 
1671 #ifdef CONFIG_SECURITY_NETWORK
1672 	nsk->sk_security = sptr;
1673 	security_sk_clone(osk, nsk);
1674 #endif
1675 }
1676 
sk_prot_alloc(struct proto * prot,gfp_t priority,int family)1677 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
1678 		int family)
1679 {
1680 	struct sock *sk;
1681 	struct kmem_cache *slab;
1682 
1683 	slab = prot->slab;
1684 	if (slab != NULL) {
1685 		sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
1686 		if (!sk)
1687 			return sk;
1688 		if (want_init_on_alloc(priority))
1689 			sk_prot_clear_nulls(sk, prot->obj_size);
1690 	} else
1691 		sk = kmalloc(prot->obj_size, priority);
1692 
1693 	if (sk != NULL) {
1694 		if (security_sk_alloc(sk, family, priority))
1695 			goto out_free;
1696 
1697 		if (!try_module_get(prot->owner))
1698 			goto out_free_sec;
1699 		sk_tx_queue_clear(sk);
1700 	}
1701 
1702 	return sk;
1703 
1704 out_free_sec:
1705 	security_sk_free(sk);
1706 out_free:
1707 	if (slab != NULL)
1708 		kmem_cache_free(slab, sk);
1709 	else
1710 		kfree(sk);
1711 	return NULL;
1712 }
1713 
sk_prot_free(struct proto * prot,struct sock * sk)1714 static void sk_prot_free(struct proto *prot, struct sock *sk)
1715 {
1716 	struct kmem_cache *slab;
1717 	struct module *owner;
1718 
1719 	owner = prot->owner;
1720 	slab = prot->slab;
1721 
1722 	cgroup_sk_free(&sk->sk_cgrp_data);
1723 	mem_cgroup_sk_free(sk);
1724 	security_sk_free(sk);
1725 	if (slab != NULL)
1726 		kmem_cache_free(slab, sk);
1727 	else
1728 		kfree(sk);
1729 	module_put(owner);
1730 }
1731 
1732 /**
1733  *	sk_alloc - All socket objects are allocated here
1734  *	@net: the applicable net namespace
1735  *	@family: protocol family
1736  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1737  *	@prot: struct proto associated with this new sock instance
1738  *	@kern: is this to be a kernel socket?
1739  */
sk_alloc(struct net * net,int family,gfp_t priority,struct proto * prot,int kern)1740 struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
1741 		      struct proto *prot, int kern)
1742 {
1743 	struct sock *sk;
1744 
1745 	sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
1746 	if (sk) {
1747 		sk->sk_family = family;
1748 		/*
1749 		 * See comment in struct sock definition to understand
1750 		 * why we need sk_prot_creator -acme
1751 		 */
1752 		sk->sk_prot = sk->sk_prot_creator = prot;
1753 		sk->sk_kern_sock = kern;
1754 		sock_lock_init(sk);
1755 		sk->sk_net_refcnt = kern ? 0 : 1;
1756 		if (likely(sk->sk_net_refcnt)) {
1757 			get_net(net);
1758 			sock_inuse_add(net, 1);
1759 		}
1760 
1761 		sock_net_set(sk, net);
1762 		refcount_set(&sk->sk_wmem_alloc, 1);
1763 
1764 		mem_cgroup_sk_alloc(sk);
1765 		cgroup_sk_alloc(&sk->sk_cgrp_data);
1766 		sock_update_classid(&sk->sk_cgrp_data);
1767 		sock_update_netprioidx(&sk->sk_cgrp_data);
1768 		sk_tx_queue_clear(sk);
1769 	}
1770 
1771 	return sk;
1772 }
1773 EXPORT_SYMBOL(sk_alloc);
1774 
1775 /* Sockets having SOCK_RCU_FREE will call this function after one RCU
1776  * grace period. This is the case for UDP sockets and TCP listeners.
1777  */
__sk_destruct(struct rcu_head * head)1778 static void __sk_destruct(struct rcu_head *head)
1779 {
1780 	struct sock *sk = container_of(head, struct sock, sk_rcu);
1781 	struct sk_filter *filter;
1782 
1783 	if (sk->sk_destruct)
1784 		sk->sk_destruct(sk);
1785 
1786 	filter = rcu_dereference_check(sk->sk_filter,
1787 				       refcount_read(&sk->sk_wmem_alloc) == 0);
1788 	if (filter) {
1789 		sk_filter_uncharge(sk, filter);
1790 		RCU_INIT_POINTER(sk->sk_filter, NULL);
1791 	}
1792 
1793 	sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
1794 
1795 #ifdef CONFIG_BPF_SYSCALL
1796 	bpf_sk_storage_free(sk);
1797 #endif
1798 
1799 	if (atomic_read(&sk->sk_omem_alloc))
1800 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
1801 			 __func__, atomic_read(&sk->sk_omem_alloc));
1802 
1803 	if (sk->sk_frag.page) {
1804 		put_page(sk->sk_frag.page);
1805 		sk->sk_frag.page = NULL;
1806 	}
1807 
1808 	/* We do not need to acquire sk->sk_peer_lock, we are the last user. */
1809 	put_cred(sk->sk_peer_cred);
1810 	put_pid(sk->sk_peer_pid);
1811 
1812 	if (likely(sk->sk_net_refcnt))
1813 		put_net(sock_net(sk));
1814 	sk_prot_free(sk->sk_prot_creator, sk);
1815 }
1816 
sk_destruct(struct sock * sk)1817 void sk_destruct(struct sock *sk)
1818 {
1819 	bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
1820 
1821 	if (rcu_access_pointer(sk->sk_reuseport_cb)) {
1822 		reuseport_detach_sock(sk);
1823 		use_call_rcu = true;
1824 	}
1825 
1826 	if (use_call_rcu)
1827 		call_rcu(&sk->sk_rcu, __sk_destruct);
1828 	else
1829 		__sk_destruct(&sk->sk_rcu);
1830 }
1831 
__sk_free(struct sock * sk)1832 static void __sk_free(struct sock *sk)
1833 {
1834 	if (likely(sk->sk_net_refcnt))
1835 		sock_inuse_add(sock_net(sk), -1);
1836 
1837 	if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
1838 		sock_diag_broadcast_destroy(sk);
1839 	else
1840 		sk_destruct(sk);
1841 }
1842 
sk_free(struct sock * sk)1843 void sk_free(struct sock *sk)
1844 {
1845 	/*
1846 	 * We subtract one from sk_wmem_alloc and can know if
1847 	 * some packets are still in some tx queue.
1848 	 * If not null, sock_wfree() will call __sk_free(sk) later
1849 	 */
1850 	if (refcount_dec_and_test(&sk->sk_wmem_alloc))
1851 		__sk_free(sk);
1852 }
1853 EXPORT_SYMBOL(sk_free);
1854 
sk_init_common(struct sock * sk)1855 static void sk_init_common(struct sock *sk)
1856 {
1857 	skb_queue_head_init(&sk->sk_receive_queue);
1858 	skb_queue_head_init(&sk->sk_write_queue);
1859 	skb_queue_head_init(&sk->sk_error_queue);
1860 
1861 	rwlock_init(&sk->sk_callback_lock);
1862 	lockdep_set_class_and_name(&sk->sk_receive_queue.lock,
1863 			af_rlock_keys + sk->sk_family,
1864 			af_family_rlock_key_strings[sk->sk_family]);
1865 	lockdep_set_class_and_name(&sk->sk_write_queue.lock,
1866 			af_wlock_keys + sk->sk_family,
1867 			af_family_wlock_key_strings[sk->sk_family]);
1868 	lockdep_set_class_and_name(&sk->sk_error_queue.lock,
1869 			af_elock_keys + sk->sk_family,
1870 			af_family_elock_key_strings[sk->sk_family]);
1871 	lockdep_set_class_and_name(&sk->sk_callback_lock,
1872 			af_callback_keys + sk->sk_family,
1873 			af_family_clock_key_strings[sk->sk_family]);
1874 }
1875 
1876 /**
1877  *	sk_clone_lock - clone a socket, and lock its clone
1878  *	@sk: the socket to clone
1879  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1880  *
1881  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1882  */
sk_clone_lock(const struct sock * sk,const gfp_t priority)1883 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1884 {
1885 	struct proto *prot = READ_ONCE(sk->sk_prot);
1886 	struct sock *newsk;
1887 	bool is_charged = true;
1888 
1889 	newsk = sk_prot_alloc(prot, priority, sk->sk_family);
1890 	if (newsk != NULL) {
1891 		struct sk_filter *filter;
1892 
1893 		sock_copy(newsk, sk);
1894 
1895 		newsk->sk_prot_creator = prot;
1896 
1897 		/* SANITY */
1898 		if (likely(newsk->sk_net_refcnt))
1899 			get_net(sock_net(newsk));
1900 		sk_node_init(&newsk->sk_node);
1901 		sock_lock_init(newsk);
1902 		bh_lock_sock(newsk);
1903 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
1904 		newsk->sk_backlog.len = 0;
1905 
1906 		atomic_set(&newsk->sk_rmem_alloc, 0);
1907 		/*
1908 		 * sk_wmem_alloc set to one (see sk_free() and sock_wfree())
1909 		 */
1910 		refcount_set(&newsk->sk_wmem_alloc, 1);
1911 		atomic_set(&newsk->sk_omem_alloc, 0);
1912 		sk_init_common(newsk);
1913 
1914 		newsk->sk_dst_cache	= NULL;
1915 		newsk->sk_dst_pending_confirm = 0;
1916 		newsk->sk_wmem_queued	= 0;
1917 		newsk->sk_forward_alloc = 0;
1918 		atomic_set(&newsk->sk_drops, 0);
1919 		newsk->sk_send_head	= NULL;
1920 		newsk->sk_userlocks	= sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
1921 		atomic_set(&newsk->sk_zckey, 0);
1922 
1923 		sock_reset_flag(newsk, SOCK_DONE);
1924 
1925 		/* sk->sk_memcg will be populated at accept() time */
1926 		newsk->sk_memcg = NULL;
1927 
1928 		cgroup_sk_clone(&newsk->sk_cgrp_data);
1929 
1930 		rcu_read_lock();
1931 		filter = rcu_dereference(sk->sk_filter);
1932 		if (filter != NULL)
1933 			/* though it's an empty new sock, the charging may fail
1934 			 * if sysctl_optmem_max was changed between creation of
1935 			 * original socket and cloning
1936 			 */
1937 			is_charged = sk_filter_charge(newsk, filter);
1938 		RCU_INIT_POINTER(newsk->sk_filter, filter);
1939 		rcu_read_unlock();
1940 
1941 		if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
1942 			/* We need to make sure that we don't uncharge the new
1943 			 * socket if we couldn't charge it in the first place
1944 			 * as otherwise we uncharge the parent's filter.
1945 			 */
1946 			if (!is_charged)
1947 				RCU_INIT_POINTER(newsk->sk_filter, NULL);
1948 			sk_free_unlock_clone(newsk);
1949 			newsk = NULL;
1950 			goto out;
1951 		}
1952 		RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL);
1953 
1954 		if (bpf_sk_storage_clone(sk, newsk)) {
1955 			sk_free_unlock_clone(newsk);
1956 			newsk = NULL;
1957 			goto out;
1958 		}
1959 
1960 		/* Clear sk_user_data if parent had the pointer tagged
1961 		 * as not suitable for copying when cloning.
1962 		 */
1963 		if (sk_user_data_is_nocopy(newsk))
1964 			newsk->sk_user_data = NULL;
1965 
1966 		newsk->sk_err	   = 0;
1967 		newsk->sk_err_soft = 0;
1968 		newsk->sk_priority = 0;
1969 		newsk->sk_incoming_cpu = raw_smp_processor_id();
1970 		if (likely(newsk->sk_net_refcnt))
1971 			sock_inuse_add(sock_net(newsk), 1);
1972 
1973 		/*
1974 		 * Before updating sk_refcnt, we must commit prior changes to memory
1975 		 * (Documentation/RCU/rculist_nulls.rst for details)
1976 		 */
1977 		smp_wmb();
1978 		refcount_set(&newsk->sk_refcnt, 2);
1979 
1980 		/*
1981 		 * Increment the counter in the same struct proto as the master
1982 		 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
1983 		 * is the same as sk->sk_prot->socks, as this field was copied
1984 		 * with memcpy).
1985 		 *
1986 		 * This _changes_ the previous behaviour, where
1987 		 * tcp_create_openreq_child always was incrementing the
1988 		 * equivalent to tcp_prot->socks (inet_sock_nr), so this have
1989 		 * to be taken into account in all callers. -acme
1990 		 */
1991 		sk_refcnt_debug_inc(newsk);
1992 		sk_set_socket(newsk, NULL);
1993 		sk_tx_queue_clear(newsk);
1994 		RCU_INIT_POINTER(newsk->sk_wq, NULL);
1995 
1996 		if (newsk->sk_prot->sockets_allocated)
1997 			sk_sockets_allocated_inc(newsk);
1998 
1999 		if (sock_needs_netstamp(sk) &&
2000 		    newsk->sk_flags & SK_FLAGS_TIMESTAMP)
2001 			net_enable_timestamp();
2002 	}
2003 out:
2004 	return newsk;
2005 }
2006 EXPORT_SYMBOL_GPL(sk_clone_lock);
2007 
sk_free_unlock_clone(struct sock * sk)2008 void sk_free_unlock_clone(struct sock *sk)
2009 {
2010 	/* It is still raw copy of parent, so invalidate
2011 	 * destructor and make plain sk_free() */
2012 	sk->sk_destruct = NULL;
2013 	bh_unlock_sock(sk);
2014 	sk_free(sk);
2015 }
2016 EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
2017 
sk_setup_caps(struct sock * sk,struct dst_entry * dst)2018 void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
2019 {
2020 	u32 max_segs = 1;
2021 
2022 	sk_dst_set(sk, dst);
2023 	sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
2024 	if (sk->sk_route_caps & NETIF_F_GSO)
2025 		sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
2026 	sk->sk_route_caps &= ~sk->sk_route_nocaps;
2027 	if (sk_can_gso(sk)) {
2028 		if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
2029 			sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
2030 		} else {
2031 			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
2032 			sk->sk_gso_max_size = dst->dev->gso_max_size;
2033 			max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
2034 		}
2035 	}
2036 	sk->sk_gso_max_segs = max_segs;
2037 }
2038 EXPORT_SYMBOL_GPL(sk_setup_caps);
2039 
2040 /*
2041  *	Simple resource managers for sockets.
2042  */
2043 
2044 
2045 /*
2046  * Write buffer destructor automatically called from kfree_skb.
2047  */
sock_wfree(struct sk_buff * skb)2048 void sock_wfree(struct sk_buff *skb)
2049 {
2050 	struct sock *sk = skb->sk;
2051 	unsigned int len = skb->truesize;
2052 
2053 	if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) {
2054 		/*
2055 		 * Keep a reference on sk_wmem_alloc, this will be released
2056 		 * after sk_write_space() call
2057 		 */
2058 		WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
2059 		sk->sk_write_space(sk);
2060 		len = 1;
2061 	}
2062 	/*
2063 	 * if sk_wmem_alloc reaches 0, we must finish what sk_free()
2064 	 * could not do because of in-flight packets
2065 	 */
2066 	if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
2067 		__sk_free(sk);
2068 }
2069 EXPORT_SYMBOL(sock_wfree);
2070 
2071 /* This variant of sock_wfree() is used by TCP,
2072  * since it sets SOCK_USE_WRITE_QUEUE.
2073  */
__sock_wfree(struct sk_buff * skb)2074 void __sock_wfree(struct sk_buff *skb)
2075 {
2076 	struct sock *sk = skb->sk;
2077 
2078 	if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
2079 		__sk_free(sk);
2080 }
2081 
skb_set_owner_w(struct sk_buff * skb,struct sock * sk)2082 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
2083 {
2084 	skb_orphan(skb);
2085 	skb->sk = sk;
2086 #ifdef CONFIG_INET
2087 	if (unlikely(!sk_fullsock(sk))) {
2088 		skb->destructor = sock_edemux;
2089 		sock_hold(sk);
2090 		return;
2091 	}
2092 #endif
2093 	skb->destructor = sock_wfree;
2094 	skb_set_hash_from_sk(skb, sk);
2095 	/*
2096 	 * We used to take a refcount on sk, but following operation
2097 	 * is enough to guarantee sk_free() wont free this sock until
2098 	 * all in-flight packets are completed
2099 	 */
2100 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
2101 }
2102 EXPORT_SYMBOL(skb_set_owner_w);
2103 
can_skb_orphan_partial(const struct sk_buff * skb)2104 static bool can_skb_orphan_partial(const struct sk_buff *skb)
2105 {
2106 #ifdef CONFIG_TLS_DEVICE
2107 	/* Drivers depend on in-order delivery for crypto offload,
2108 	 * partial orphan breaks out-of-order-OK logic.
2109 	 */
2110 	if (skb->decrypted)
2111 		return false;
2112 #endif
2113 	return (skb->destructor == sock_wfree ||
2114 		(IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
2115 }
2116 
2117 /* This helper is used by netem, as it can hold packets in its
2118  * delay queue. We want to allow the owner socket to send more
2119  * packets, as if they were already TX completed by a typical driver.
2120  * But we also want to keep skb->sk set because some packet schedulers
2121  * rely on it (sch_fq for example).
2122  */
skb_orphan_partial(struct sk_buff * skb)2123 void skb_orphan_partial(struct sk_buff *skb)
2124 {
2125 	if (skb_is_tcp_pure_ack(skb))
2126 		return;
2127 
2128 	if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk))
2129 		return;
2130 
2131 	skb_orphan(skb);
2132 }
2133 EXPORT_SYMBOL(skb_orphan_partial);
2134 
2135 /*
2136  * Read buffer destructor automatically called from kfree_skb.
2137  */
sock_rfree(struct sk_buff * skb)2138 void sock_rfree(struct sk_buff *skb)
2139 {
2140 	struct sock *sk = skb->sk;
2141 	unsigned int len = skb->truesize;
2142 
2143 	atomic_sub(len, &sk->sk_rmem_alloc);
2144 	sk_mem_uncharge(sk, len);
2145 }
2146 EXPORT_SYMBOL(sock_rfree);
2147 
2148 /*
2149  * Buffer destructor for skbs that are not used directly in read or write
2150  * path, e.g. for error handler skbs. Automatically called from kfree_skb.
2151  */
sock_efree(struct sk_buff * skb)2152 void sock_efree(struct sk_buff *skb)
2153 {
2154 	sock_put(skb->sk);
2155 }
2156 EXPORT_SYMBOL(sock_efree);
2157 
2158 /* Buffer destructor for prefetch/receive path where reference count may
2159  * not be held, e.g. for listen sockets.
2160  */
2161 #ifdef CONFIG_INET
sock_pfree(struct sk_buff * skb)2162 void sock_pfree(struct sk_buff *skb)
2163 {
2164 	if (sk_is_refcounted(skb->sk))
2165 		sock_gen_put(skb->sk);
2166 }
2167 EXPORT_SYMBOL(sock_pfree);
2168 #endif /* CONFIG_INET */
2169 
sock_i_uid(struct sock * sk)2170 kuid_t sock_i_uid(struct sock *sk)
2171 {
2172 	kuid_t uid;
2173 
2174 	read_lock_bh(&sk->sk_callback_lock);
2175 	uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID;
2176 	read_unlock_bh(&sk->sk_callback_lock);
2177 	return uid;
2178 }
2179 EXPORT_SYMBOL(sock_i_uid);
2180 
sock_i_ino(struct sock * sk)2181 unsigned long sock_i_ino(struct sock *sk)
2182 {
2183 	unsigned long ino;
2184 
2185 	read_lock_bh(&sk->sk_callback_lock);
2186 	ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
2187 	read_unlock_bh(&sk->sk_callback_lock);
2188 	return ino;
2189 }
2190 EXPORT_SYMBOL(sock_i_ino);
2191 
2192 /*
2193  * Allocate a skb from the socket's send buffer.
2194  */
sock_wmalloc(struct sock * sk,unsigned long size,int force,gfp_t priority)2195 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
2196 			     gfp_t priority)
2197 {
2198 	if (force ||
2199 	    refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
2200 		struct sk_buff *skb = alloc_skb(size, priority);
2201 
2202 		if (skb) {
2203 			skb_set_owner_w(skb, sk);
2204 			return skb;
2205 		}
2206 	}
2207 	return NULL;
2208 }
2209 EXPORT_SYMBOL(sock_wmalloc);
2210 
sock_ofree(struct sk_buff * skb)2211 static void sock_ofree(struct sk_buff *skb)
2212 {
2213 	struct sock *sk = skb->sk;
2214 
2215 	atomic_sub(skb->truesize, &sk->sk_omem_alloc);
2216 }
2217 
sock_omalloc(struct sock * sk,unsigned long size,gfp_t priority)2218 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size,
2219 			     gfp_t priority)
2220 {
2221 	struct sk_buff *skb;
2222 
2223 	/* small safe race: SKB_TRUESIZE may differ from final skb->truesize */
2224 	if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) >
2225 	    sysctl_optmem_max)
2226 		return NULL;
2227 
2228 	skb = alloc_skb(size, priority);
2229 	if (!skb)
2230 		return NULL;
2231 
2232 	atomic_add(skb->truesize, &sk->sk_omem_alloc);
2233 	skb->sk = sk;
2234 	skb->destructor = sock_ofree;
2235 	return skb;
2236 }
2237 
2238 /*
2239  * Allocate a memory block from the socket's option memory buffer.
2240  */
sock_kmalloc(struct sock * sk,int size,gfp_t priority)2241 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
2242 {
2243 	if ((unsigned int)size <= sysctl_optmem_max &&
2244 	    atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
2245 		void *mem;
2246 		/* First do the add, to avoid the race if kmalloc
2247 		 * might sleep.
2248 		 */
2249 		atomic_add(size, &sk->sk_omem_alloc);
2250 		mem = kmalloc(size, priority);
2251 		if (mem)
2252 			return mem;
2253 		atomic_sub(size, &sk->sk_omem_alloc);
2254 	}
2255 	return NULL;
2256 }
2257 EXPORT_SYMBOL(sock_kmalloc);
2258 
2259 /* Free an option memory block. Note, we actually want the inline
2260  * here as this allows gcc to detect the nullify and fold away the
2261  * condition entirely.
2262  */
__sock_kfree_s(struct sock * sk,void * mem,int size,const bool nullify)2263 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
2264 				  const bool nullify)
2265 {
2266 	if (WARN_ON_ONCE(!mem))
2267 		return;
2268 	if (nullify)
2269 		kfree_sensitive(mem);
2270 	else
2271 		kfree(mem);
2272 	atomic_sub(size, &sk->sk_omem_alloc);
2273 }
2274 
sock_kfree_s(struct sock * sk,void * mem,int size)2275 void sock_kfree_s(struct sock *sk, void *mem, int size)
2276 {
2277 	__sock_kfree_s(sk, mem, size, false);
2278 }
2279 EXPORT_SYMBOL(sock_kfree_s);
2280 
sock_kzfree_s(struct sock * sk,void * mem,int size)2281 void sock_kzfree_s(struct sock *sk, void *mem, int size)
2282 {
2283 	__sock_kfree_s(sk, mem, size, true);
2284 }
2285 EXPORT_SYMBOL(sock_kzfree_s);
2286 
2287 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
2288    I think, these locks should be removed for datagram sockets.
2289  */
sock_wait_for_wmem(struct sock * sk,long timeo)2290 static long sock_wait_for_wmem(struct sock *sk, long timeo)
2291 {
2292 	DEFINE_WAIT(wait);
2293 
2294 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2295 	for (;;) {
2296 		if (!timeo)
2297 			break;
2298 		if (signal_pending(current))
2299 			break;
2300 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2301 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2302 		if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
2303 			break;
2304 		if (sk->sk_shutdown & SEND_SHUTDOWN)
2305 			break;
2306 		if (sk->sk_err)
2307 			break;
2308 		timeo = schedule_timeout(timeo);
2309 	}
2310 	finish_wait(sk_sleep(sk), &wait);
2311 	return timeo;
2312 }
2313 
2314 
2315 /*
2316  *	Generic send/receive buffer handlers
2317  */
2318 
sock_alloc_send_pskb(struct sock * sk,unsigned long header_len,unsigned long data_len,int noblock,int * errcode,int max_page_order)2319 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
2320 				     unsigned long data_len, int noblock,
2321 				     int *errcode, int max_page_order)
2322 {
2323 	struct sk_buff *skb;
2324 	long timeo;
2325 	int err;
2326 
2327 	timeo = sock_sndtimeo(sk, noblock);
2328 	for (;;) {
2329 		err = sock_error(sk);
2330 		if (err != 0)
2331 			goto failure;
2332 
2333 		err = -EPIPE;
2334 		if (sk->sk_shutdown & SEND_SHUTDOWN)
2335 			goto failure;
2336 
2337 		if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
2338 			break;
2339 
2340 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2341 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2342 		err = -EAGAIN;
2343 		if (!timeo)
2344 			goto failure;
2345 		if (signal_pending(current))
2346 			goto interrupted;
2347 		timeo = sock_wait_for_wmem(sk, timeo);
2348 	}
2349 	skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
2350 				   errcode, sk->sk_allocation);
2351 	if (skb)
2352 		skb_set_owner_w(skb, sk);
2353 	return skb;
2354 
2355 interrupted:
2356 	err = sock_intr_errno(timeo);
2357 failure:
2358 	*errcode = err;
2359 	return NULL;
2360 }
2361 EXPORT_SYMBOL(sock_alloc_send_pskb);
2362 
sock_alloc_send_skb(struct sock * sk,unsigned long size,int noblock,int * errcode)2363 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
2364 				    int noblock, int *errcode)
2365 {
2366 	return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
2367 }
2368 EXPORT_SYMBOL(sock_alloc_send_skb);
2369 
__sock_cmsg_send(struct sock * sk,struct msghdr * msg,struct cmsghdr * cmsg,struct sockcm_cookie * sockc)2370 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
2371 		     struct sockcm_cookie *sockc)
2372 {
2373 	u32 tsflags;
2374 
2375 	switch (cmsg->cmsg_type) {
2376 	case SO_MARK:
2377 		if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
2378 			return -EPERM;
2379 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2380 			return -EINVAL;
2381 		sockc->mark = *(u32 *)CMSG_DATA(cmsg);
2382 		break;
2383 	case SO_TIMESTAMPING_OLD:
2384 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
2385 			return -EINVAL;
2386 
2387 		tsflags = *(u32 *)CMSG_DATA(cmsg);
2388 		if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
2389 			return -EINVAL;
2390 
2391 		sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
2392 		sockc->tsflags |= tsflags;
2393 		break;
2394 	case SCM_TXTIME:
2395 		if (!sock_flag(sk, SOCK_TXTIME))
2396 			return -EINVAL;
2397 		if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
2398 			return -EINVAL;
2399 		sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
2400 		break;
2401 	/* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
2402 	case SCM_RIGHTS:
2403 	case SCM_CREDENTIALS:
2404 		break;
2405 	default:
2406 		return -EINVAL;
2407 	}
2408 	return 0;
2409 }
2410 EXPORT_SYMBOL(__sock_cmsg_send);
2411 
sock_cmsg_send(struct sock * sk,struct msghdr * msg,struct sockcm_cookie * sockc)2412 int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
2413 		   struct sockcm_cookie *sockc)
2414 {
2415 	struct cmsghdr *cmsg;
2416 	int ret;
2417 
2418 	for_each_cmsghdr(cmsg, msg) {
2419 		if (!CMSG_OK(msg, cmsg))
2420 			return -EINVAL;
2421 		if (cmsg->cmsg_level != SOL_SOCKET)
2422 			continue;
2423 		ret = __sock_cmsg_send(sk, msg, cmsg, sockc);
2424 		if (ret)
2425 			return ret;
2426 	}
2427 	return 0;
2428 }
2429 EXPORT_SYMBOL(sock_cmsg_send);
2430 
sk_enter_memory_pressure(struct sock * sk)2431 static void sk_enter_memory_pressure(struct sock *sk)
2432 {
2433 	if (!sk->sk_prot->enter_memory_pressure)
2434 		return;
2435 
2436 	sk->sk_prot->enter_memory_pressure(sk);
2437 }
2438 
sk_leave_memory_pressure(struct sock * sk)2439 static void sk_leave_memory_pressure(struct sock *sk)
2440 {
2441 	if (sk->sk_prot->leave_memory_pressure) {
2442 		sk->sk_prot->leave_memory_pressure(sk);
2443 	} else {
2444 		unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
2445 
2446 		if (memory_pressure && READ_ONCE(*memory_pressure))
2447 			WRITE_ONCE(*memory_pressure, 0);
2448 	}
2449 }
2450 
2451 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key);
2452 
2453 /**
2454  * skb_page_frag_refill - check that a page_frag contains enough room
2455  * @sz: minimum size of the fragment we want to get
2456  * @pfrag: pointer to page_frag
2457  * @gfp: priority for memory allocation
2458  *
2459  * Note: While this allocator tries to use high order pages, there is
2460  * no guarantee that allocations succeed. Therefore, @sz MUST be
2461  * less or equal than PAGE_SIZE.
2462  */
skb_page_frag_refill(unsigned int sz,struct page_frag * pfrag,gfp_t gfp)2463 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
2464 {
2465 	if (pfrag->page) {
2466 		if (page_ref_count(pfrag->page) == 1) {
2467 			pfrag->offset = 0;
2468 			return true;
2469 		}
2470 		if (pfrag->offset + sz <= pfrag->size)
2471 			return true;
2472 		put_page(pfrag->page);
2473 	}
2474 
2475 	pfrag->offset = 0;
2476 	if (SKB_FRAG_PAGE_ORDER &&
2477 	    !static_branch_unlikely(&net_high_order_alloc_disable_key)) {
2478 		/* Avoid direct reclaim but allow kswapd to wake */
2479 		pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
2480 					  __GFP_COMP | __GFP_NOWARN |
2481 					  __GFP_NORETRY,
2482 					  SKB_FRAG_PAGE_ORDER);
2483 		if (likely(pfrag->page)) {
2484 			pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
2485 			return true;
2486 		}
2487 	}
2488 	pfrag->page = alloc_page(gfp);
2489 	if (likely(pfrag->page)) {
2490 		pfrag->size = PAGE_SIZE;
2491 		return true;
2492 	}
2493 	return false;
2494 }
2495 EXPORT_SYMBOL(skb_page_frag_refill);
2496 
sk_page_frag_refill(struct sock * sk,struct page_frag * pfrag)2497 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
2498 {
2499 	if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation)))
2500 		return true;
2501 
2502 	sk_enter_memory_pressure(sk);
2503 	sk_stream_moderate_sndbuf(sk);
2504 	return false;
2505 }
2506 EXPORT_SYMBOL(sk_page_frag_refill);
2507 
__lock_sock(struct sock * sk)2508 static void __lock_sock(struct sock *sk)
2509 	__releases(&sk->sk_lock.slock)
2510 	__acquires(&sk->sk_lock.slock)
2511 {
2512 	DEFINE_WAIT(wait);
2513 
2514 	for (;;) {
2515 		prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
2516 					TASK_UNINTERRUPTIBLE);
2517 		spin_unlock_bh(&sk->sk_lock.slock);
2518 		schedule();
2519 		spin_lock_bh(&sk->sk_lock.slock);
2520 		if (!sock_owned_by_user(sk))
2521 			break;
2522 	}
2523 	finish_wait(&sk->sk_lock.wq, &wait);
2524 }
2525 
__release_sock(struct sock * sk)2526 void __release_sock(struct sock *sk)
2527 	__releases(&sk->sk_lock.slock)
2528 	__acquires(&sk->sk_lock.slock)
2529 {
2530 	struct sk_buff *skb, *next;
2531 
2532 	while ((skb = sk->sk_backlog.head) != NULL) {
2533 		sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
2534 
2535 		spin_unlock_bh(&sk->sk_lock.slock);
2536 
2537 		do {
2538 			next = skb->next;
2539 			prefetch(next);
2540 			WARN_ON_ONCE(skb_dst_is_noref(skb));
2541 			skb_mark_not_on_list(skb);
2542 			sk_backlog_rcv(sk, skb);
2543 
2544 			cond_resched();
2545 
2546 			skb = next;
2547 		} while (skb != NULL);
2548 
2549 		spin_lock_bh(&sk->sk_lock.slock);
2550 	}
2551 
2552 	/*
2553 	 * Doing the zeroing here guarantee we can not loop forever
2554 	 * while a wild producer attempts to flood us.
2555 	 */
2556 	sk->sk_backlog.len = 0;
2557 }
2558 
__sk_flush_backlog(struct sock * sk)2559 void __sk_flush_backlog(struct sock *sk)
2560 {
2561 	spin_lock_bh(&sk->sk_lock.slock);
2562 	__release_sock(sk);
2563 	spin_unlock_bh(&sk->sk_lock.slock);
2564 }
2565 
2566 /**
2567  * sk_wait_data - wait for data to arrive at sk_receive_queue
2568  * @sk:    sock to wait on
2569  * @timeo: for how long
2570  * @skb:   last skb seen on sk_receive_queue
2571  *
2572  * Now socket state including sk->sk_err is changed only under lock,
2573  * hence we may omit checks after joining wait queue.
2574  * We check receive queue before schedule() only as optimization;
2575  * it is very likely that release_sock() added new data.
2576  */
sk_wait_data(struct sock * sk,long * timeo,const struct sk_buff * skb)2577 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
2578 {
2579 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
2580 	int rc;
2581 
2582 	add_wait_queue(sk_sleep(sk), &wait);
2583 	sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2584 	rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait);
2585 	sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2586 	remove_wait_queue(sk_sleep(sk), &wait);
2587 	return rc;
2588 }
2589 EXPORT_SYMBOL(sk_wait_data);
2590 
2591 /**
2592  *	__sk_mem_raise_allocated - increase memory_allocated
2593  *	@sk: socket
2594  *	@size: memory size to allocate
2595  *	@amt: pages to allocate
2596  *	@kind: allocation type
2597  *
2598  *	Similar to __sk_mem_schedule(), but does not update sk_forward_alloc
2599  */
__sk_mem_raise_allocated(struct sock * sk,int size,int amt,int kind)2600 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
2601 {
2602 	struct proto *prot = sk->sk_prot;
2603 	long allocated = sk_memory_allocated_add(sk, amt);
2604 	bool charged = true;
2605 
2606 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
2607 	    !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
2608 		goto suppress_allocation;
2609 
2610 	/* Under limit. */
2611 	if (allocated <= sk_prot_mem_limits(sk, 0)) {
2612 		sk_leave_memory_pressure(sk);
2613 		return 1;
2614 	}
2615 
2616 	/* Under pressure. */
2617 	if (allocated > sk_prot_mem_limits(sk, 1))
2618 		sk_enter_memory_pressure(sk);
2619 
2620 	/* Over hard limit. */
2621 	if (allocated > sk_prot_mem_limits(sk, 2))
2622 		goto suppress_allocation;
2623 
2624 	/* guarantee minimum buffer size under pressure */
2625 	if (kind == SK_MEM_RECV) {
2626 		if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
2627 			return 1;
2628 
2629 	} else { /* SK_MEM_SEND */
2630 		int wmem0 = sk_get_wmem0(sk, prot);
2631 
2632 		if (sk->sk_type == SOCK_STREAM) {
2633 			if (sk->sk_wmem_queued < wmem0)
2634 				return 1;
2635 		} else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) {
2636 				return 1;
2637 		}
2638 	}
2639 
2640 	if (sk_has_memory_pressure(sk)) {
2641 		u64 alloc;
2642 
2643 		if (!sk_under_memory_pressure(sk))
2644 			return 1;
2645 		alloc = sk_sockets_allocated_read_positive(sk);
2646 		if (sk_prot_mem_limits(sk, 2) > alloc *
2647 		    sk_mem_pages(sk->sk_wmem_queued +
2648 				 atomic_read(&sk->sk_rmem_alloc) +
2649 				 sk->sk_forward_alloc))
2650 			return 1;
2651 	}
2652 
2653 suppress_allocation:
2654 
2655 	if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
2656 		sk_stream_moderate_sndbuf(sk);
2657 
2658 		/* Fail only if socket is _under_ its sndbuf.
2659 		 * In this case we cannot block, so that we have to fail.
2660 		 */
2661 		if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
2662 			return 1;
2663 	}
2664 
2665 	if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
2666 		trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
2667 
2668 	sk_memory_allocated_sub(sk, amt);
2669 
2670 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2671 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amt);
2672 
2673 	return 0;
2674 }
2675 EXPORT_SYMBOL(__sk_mem_raise_allocated);
2676 
2677 /**
2678  *	__sk_mem_schedule - increase sk_forward_alloc and memory_allocated
2679  *	@sk: socket
2680  *	@size: memory size to allocate
2681  *	@kind: allocation type
2682  *
2683  *	If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
2684  *	rmem allocation. This function assumes that protocols which have
2685  *	memory_pressure use sk_wmem_queued as write buffer accounting.
2686  */
__sk_mem_schedule(struct sock * sk,int size,int kind)2687 int __sk_mem_schedule(struct sock *sk, int size, int kind)
2688 {
2689 	int ret, amt = sk_mem_pages(size);
2690 
2691 	sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT;
2692 	ret = __sk_mem_raise_allocated(sk, size, amt, kind);
2693 	if (!ret)
2694 		sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT;
2695 	return ret;
2696 }
2697 EXPORT_SYMBOL(__sk_mem_schedule);
2698 
2699 /**
2700  *	__sk_mem_reduce_allocated - reclaim memory_allocated
2701  *	@sk: socket
2702  *	@amount: number of quanta
2703  *
2704  *	Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc
2705  */
__sk_mem_reduce_allocated(struct sock * sk,int amount)2706 void __sk_mem_reduce_allocated(struct sock *sk, int amount)
2707 {
2708 	sk_memory_allocated_sub(sk, amount);
2709 
2710 	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2711 		mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
2712 
2713 	if (sk_under_memory_pressure(sk) &&
2714 	    (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
2715 		sk_leave_memory_pressure(sk);
2716 }
2717 EXPORT_SYMBOL(__sk_mem_reduce_allocated);
2718 
2719 /**
2720  *	__sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated
2721  *	@sk: socket
2722  *	@amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple)
2723  */
__sk_mem_reclaim(struct sock * sk,int amount)2724 void __sk_mem_reclaim(struct sock *sk, int amount)
2725 {
2726 	amount >>= SK_MEM_QUANTUM_SHIFT;
2727 	sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT;
2728 	__sk_mem_reduce_allocated(sk, amount);
2729 }
2730 EXPORT_SYMBOL(__sk_mem_reclaim);
2731 
sk_set_peek_off(struct sock * sk,int val)2732 int sk_set_peek_off(struct sock *sk, int val)
2733 {
2734 	sk->sk_peek_off = val;
2735 	return 0;
2736 }
2737 EXPORT_SYMBOL_GPL(sk_set_peek_off);
2738 
2739 /*
2740  * Set of default routines for initialising struct proto_ops when
2741  * the protocol does not support a particular function. In certain
2742  * cases where it makes no sense for a protocol to have a "do nothing"
2743  * function, some default processing is provided.
2744  */
2745 
sock_no_bind(struct socket * sock,struct sockaddr * saddr,int len)2746 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
2747 {
2748 	return -EOPNOTSUPP;
2749 }
2750 EXPORT_SYMBOL(sock_no_bind);
2751 
sock_no_connect(struct socket * sock,struct sockaddr * saddr,int len,int flags)2752 int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
2753 		    int len, int flags)
2754 {
2755 	return -EOPNOTSUPP;
2756 }
2757 EXPORT_SYMBOL(sock_no_connect);
2758 
sock_no_socketpair(struct socket * sock1,struct socket * sock2)2759 int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
2760 {
2761 	return -EOPNOTSUPP;
2762 }
2763 EXPORT_SYMBOL(sock_no_socketpair);
2764 
sock_no_accept(struct socket * sock,struct socket * newsock,int flags,bool kern)2765 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags,
2766 		   bool kern)
2767 {
2768 	return -EOPNOTSUPP;
2769 }
2770 EXPORT_SYMBOL(sock_no_accept);
2771 
sock_no_getname(struct socket * sock,struct sockaddr * saddr,int peer)2772 int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
2773 		    int peer)
2774 {
2775 	return -EOPNOTSUPP;
2776 }
2777 EXPORT_SYMBOL(sock_no_getname);
2778 
sock_no_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)2779 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2780 {
2781 	return -EOPNOTSUPP;
2782 }
2783 EXPORT_SYMBOL(sock_no_ioctl);
2784 
sock_no_listen(struct socket * sock,int backlog)2785 int sock_no_listen(struct socket *sock, int backlog)
2786 {
2787 	return -EOPNOTSUPP;
2788 }
2789 EXPORT_SYMBOL(sock_no_listen);
2790 
sock_no_shutdown(struct socket * sock,int how)2791 int sock_no_shutdown(struct socket *sock, int how)
2792 {
2793 	return -EOPNOTSUPP;
2794 }
2795 EXPORT_SYMBOL(sock_no_shutdown);
2796 
sock_no_sendmsg(struct socket * sock,struct msghdr * m,size_t len)2797 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2798 {
2799 	return -EOPNOTSUPP;
2800 }
2801 EXPORT_SYMBOL(sock_no_sendmsg);
2802 
sock_no_sendmsg_locked(struct sock * sk,struct msghdr * m,size_t len)2803 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len)
2804 {
2805 	return -EOPNOTSUPP;
2806 }
2807 EXPORT_SYMBOL(sock_no_sendmsg_locked);
2808 
sock_no_recvmsg(struct socket * sock,struct msghdr * m,size_t len,int flags)2809 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len,
2810 		    int flags)
2811 {
2812 	return -EOPNOTSUPP;
2813 }
2814 EXPORT_SYMBOL(sock_no_recvmsg);
2815 
sock_no_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)2816 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
2817 {
2818 	/* Mirror missing mmap method error code */
2819 	return -ENODEV;
2820 }
2821 EXPORT_SYMBOL(sock_no_mmap);
2822 
2823 /*
2824  * When a file is received (via SCM_RIGHTS, etc), we must bump the
2825  * various sock-based usage counts.
2826  */
__receive_sock(struct file * file)2827 void __receive_sock(struct file *file)
2828 {
2829 	struct socket *sock;
2830 	int error;
2831 
2832 	/*
2833 	 * The resulting value of "error" is ignored here since we only
2834 	 * need to take action when the file is a socket and testing
2835 	 * "sock" for NULL is sufficient.
2836 	 */
2837 	sock = sock_from_file(file, &error);
2838 	if (sock) {
2839 		sock_update_netprioidx(&sock->sk->sk_cgrp_data);
2840 		sock_update_classid(&sock->sk->sk_cgrp_data);
2841 	}
2842 }
2843 
sock_no_sendpage(struct socket * sock,struct page * page,int offset,size_t size,int flags)2844 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
2845 {
2846 	ssize_t res;
2847 	struct msghdr msg = {.msg_flags = flags};
2848 	struct kvec iov;
2849 	char *kaddr = kmap(page);
2850 	iov.iov_base = kaddr + offset;
2851 	iov.iov_len = size;
2852 	res = kernel_sendmsg(sock, &msg, &iov, 1, size);
2853 	kunmap(page);
2854 	return res;
2855 }
2856 EXPORT_SYMBOL(sock_no_sendpage);
2857 
sock_no_sendpage_locked(struct sock * sk,struct page * page,int offset,size_t size,int flags)2858 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page,
2859 				int offset, size_t size, int flags)
2860 {
2861 	ssize_t res;
2862 	struct msghdr msg = {.msg_flags = flags};
2863 	struct kvec iov;
2864 	char *kaddr = kmap(page);
2865 
2866 	iov.iov_base = kaddr + offset;
2867 	iov.iov_len = size;
2868 	res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size);
2869 	kunmap(page);
2870 	return res;
2871 }
2872 EXPORT_SYMBOL(sock_no_sendpage_locked);
2873 
2874 /*
2875  *	Default Socket Callbacks
2876  */
2877 
sock_def_wakeup(struct sock * sk)2878 static void sock_def_wakeup(struct sock *sk)
2879 {
2880 	struct socket_wq *wq;
2881 
2882 	rcu_read_lock();
2883 	wq = rcu_dereference(sk->sk_wq);
2884 	if (skwq_has_sleeper(wq))
2885 		wake_up_interruptible_all(&wq->wait);
2886 	rcu_read_unlock();
2887 }
2888 
sock_def_error_report(struct sock * sk)2889 static void sock_def_error_report(struct sock *sk)
2890 {
2891 	struct socket_wq *wq;
2892 
2893 	rcu_read_lock();
2894 	wq = rcu_dereference(sk->sk_wq);
2895 	if (skwq_has_sleeper(wq))
2896 		wake_up_interruptible_poll(&wq->wait, EPOLLERR);
2897 	sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
2898 	rcu_read_unlock();
2899 }
2900 
sock_def_readable(struct sock * sk)2901 void sock_def_readable(struct sock *sk)
2902 {
2903 	struct socket_wq *wq;
2904 
2905 	rcu_read_lock();
2906 	wq = rcu_dereference(sk->sk_wq);
2907 	if (skwq_has_sleeper(wq))
2908 		wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
2909 						EPOLLRDNORM | EPOLLRDBAND);
2910 	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
2911 	rcu_read_unlock();
2912 }
2913 
sock_def_write_space(struct sock * sk)2914 static void sock_def_write_space(struct sock *sk)
2915 {
2916 	struct socket_wq *wq;
2917 
2918 	rcu_read_lock();
2919 
2920 	/* Do not wake up a writer until he can make "significant"
2921 	 * progress.  --DaveM
2922 	 */
2923 	if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
2924 		wq = rcu_dereference(sk->sk_wq);
2925 		if (skwq_has_sleeper(wq))
2926 			wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2927 						EPOLLWRNORM | EPOLLWRBAND);
2928 
2929 		/* Should agree with poll, otherwise some programs break */
2930 		if (sock_writeable(sk))
2931 			sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
2932 	}
2933 
2934 	rcu_read_unlock();
2935 }
2936 
sock_def_destruct(struct sock * sk)2937 static void sock_def_destruct(struct sock *sk)
2938 {
2939 }
2940 
sk_send_sigurg(struct sock * sk)2941 void sk_send_sigurg(struct sock *sk)
2942 {
2943 	if (sk->sk_socket && sk->sk_socket->file)
2944 		if (send_sigurg(&sk->sk_socket->file->f_owner))
2945 			sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
2946 }
2947 EXPORT_SYMBOL(sk_send_sigurg);
2948 
sk_reset_timer(struct sock * sk,struct timer_list * timer,unsigned long expires)2949 void sk_reset_timer(struct sock *sk, struct timer_list* timer,
2950 		    unsigned long expires)
2951 {
2952 	if (!mod_timer(timer, expires))
2953 		sock_hold(sk);
2954 }
2955 EXPORT_SYMBOL(sk_reset_timer);
2956 
sk_stop_timer(struct sock * sk,struct timer_list * timer)2957 void sk_stop_timer(struct sock *sk, struct timer_list* timer)
2958 {
2959 	if (del_timer(timer))
2960 		__sock_put(sk);
2961 }
2962 EXPORT_SYMBOL(sk_stop_timer);
2963 
sk_stop_timer_sync(struct sock * sk,struct timer_list * timer)2964 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
2965 {
2966 	if (del_timer_sync(timer))
2967 		__sock_put(sk);
2968 }
2969 EXPORT_SYMBOL(sk_stop_timer_sync);
2970 
sock_init_data(struct socket * sock,struct sock * sk)2971 void sock_init_data(struct socket *sock, struct sock *sk)
2972 {
2973 	sk_init_common(sk);
2974 	sk->sk_send_head	=	NULL;
2975 
2976 	timer_setup(&sk->sk_timer, NULL, 0);
2977 
2978 	sk->sk_allocation	=	GFP_KERNEL;
2979 	sk->sk_rcvbuf		=	sysctl_rmem_default;
2980 	sk->sk_sndbuf		=	sysctl_wmem_default;
2981 	sk->sk_state		=	TCP_CLOSE;
2982 	sk_set_socket(sk, sock);
2983 
2984 	sock_set_flag(sk, SOCK_ZAPPED);
2985 
2986 	if (sock) {
2987 		sk->sk_type	=	sock->type;
2988 		RCU_INIT_POINTER(sk->sk_wq, &sock->wq);
2989 		sock->sk	=	sk;
2990 		sk->sk_uid	=	SOCK_INODE(sock)->i_uid;
2991 	} else {
2992 		RCU_INIT_POINTER(sk->sk_wq, NULL);
2993 		sk->sk_uid	=	make_kuid(sock_net(sk)->user_ns, 0);
2994 	}
2995 
2996 	rwlock_init(&sk->sk_callback_lock);
2997 	if (sk->sk_kern_sock)
2998 		lockdep_set_class_and_name(
2999 			&sk->sk_callback_lock,
3000 			af_kern_callback_keys + sk->sk_family,
3001 			af_family_kern_clock_key_strings[sk->sk_family]);
3002 	else
3003 		lockdep_set_class_and_name(
3004 			&sk->sk_callback_lock,
3005 			af_callback_keys + sk->sk_family,
3006 			af_family_clock_key_strings[sk->sk_family]);
3007 
3008 	sk->sk_state_change	=	sock_def_wakeup;
3009 	sk->sk_data_ready	=	sock_def_readable;
3010 	sk->sk_write_space	=	sock_def_write_space;
3011 	sk->sk_error_report	=	sock_def_error_report;
3012 	sk->sk_destruct		=	sock_def_destruct;
3013 
3014 	sk->sk_frag.page	=	NULL;
3015 	sk->sk_frag.offset	=	0;
3016 	sk->sk_peek_off		=	-1;
3017 
3018 	sk->sk_peer_pid 	=	NULL;
3019 	sk->sk_peer_cred	=	NULL;
3020 	spin_lock_init(&sk->sk_peer_lock);
3021 
3022 	sk->sk_write_pending	=	0;
3023 	sk->sk_rcvlowat		=	1;
3024 	sk->sk_rcvtimeo		=	MAX_SCHEDULE_TIMEOUT;
3025 	sk->sk_sndtimeo		=	MAX_SCHEDULE_TIMEOUT;
3026 
3027 	sk->sk_stamp = SK_DEFAULT_STAMP;
3028 #if BITS_PER_LONG==32
3029 	seqlock_init(&sk->sk_stamp_seq);
3030 #endif
3031 	atomic_set(&sk->sk_zckey, 0);
3032 
3033 #ifdef CONFIG_NET_RX_BUSY_POLL
3034 	sk->sk_napi_id		=	0;
3035 	sk->sk_ll_usec		=	sysctl_net_busy_read;
3036 #endif
3037 
3038 	sk->sk_max_pacing_rate = ~0UL;
3039 	sk->sk_pacing_rate = ~0UL;
3040 	WRITE_ONCE(sk->sk_pacing_shift, 10);
3041 	sk->sk_incoming_cpu = -1;
3042 
3043 	sk_rx_queue_clear(sk);
3044 	/*
3045 	 * Before updating sk_refcnt, we must commit prior changes to memory
3046 	 * (Documentation/RCU/rculist_nulls.rst for details)
3047 	 */
3048 	smp_wmb();
3049 	refcount_set(&sk->sk_refcnt, 1);
3050 	atomic_set(&sk->sk_drops, 0);
3051 }
3052 EXPORT_SYMBOL(sock_init_data);
3053 
lock_sock_nested(struct sock * sk,int subclass)3054 void lock_sock_nested(struct sock *sk, int subclass)
3055 {
3056 	might_sleep();
3057 	spin_lock_bh(&sk->sk_lock.slock);
3058 	if (sk->sk_lock.owned)
3059 		__lock_sock(sk);
3060 	sk->sk_lock.owned = 1;
3061 	spin_unlock(&sk->sk_lock.slock);
3062 	/*
3063 	 * The sk_lock has mutex_lock() semantics here:
3064 	 */
3065 	mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
3066 	local_bh_enable();
3067 }
3068 EXPORT_SYMBOL(lock_sock_nested);
3069 
release_sock(struct sock * sk)3070 void release_sock(struct sock *sk)
3071 {
3072 	spin_lock_bh(&sk->sk_lock.slock);
3073 	if (sk->sk_backlog.tail)
3074 		__release_sock(sk);
3075 
3076 	/* Warning : release_cb() might need to release sk ownership,
3077 	 * ie call sock_release_ownership(sk) before us.
3078 	 */
3079 	if (sk->sk_prot->release_cb)
3080 		sk->sk_prot->release_cb(sk);
3081 
3082 	sock_release_ownership(sk);
3083 	if (waitqueue_active(&sk->sk_lock.wq))
3084 		wake_up(&sk->sk_lock.wq);
3085 	spin_unlock_bh(&sk->sk_lock.slock);
3086 }
3087 EXPORT_SYMBOL(release_sock);
3088 
3089 /**
3090  * lock_sock_fast - fast version of lock_sock
3091  * @sk: socket
3092  *
3093  * This version should be used for very small section, where process wont block
3094  * return false if fast path is taken:
3095  *
3096  *   sk_lock.slock locked, owned = 0, BH disabled
3097  *
3098  * return true if slow path is taken:
3099  *
3100  *   sk_lock.slock unlocked, owned = 1, BH enabled
3101  */
lock_sock_fast(struct sock * sk)3102 bool lock_sock_fast(struct sock *sk)
3103 {
3104 	might_sleep();
3105 	spin_lock_bh(&sk->sk_lock.slock);
3106 
3107 	if (!sk->sk_lock.owned)
3108 		/*
3109 		 * Note : We must disable BH
3110 		 */
3111 		return false;
3112 
3113 	__lock_sock(sk);
3114 	sk->sk_lock.owned = 1;
3115 	spin_unlock(&sk->sk_lock.slock);
3116 	/*
3117 	 * The sk_lock has mutex_lock() semantics here:
3118 	 */
3119 	mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
3120 	local_bh_enable();
3121 	return true;
3122 }
3123 EXPORT_SYMBOL(lock_sock_fast);
3124 
sock_gettstamp(struct socket * sock,void __user * userstamp,bool timeval,bool time32)3125 int sock_gettstamp(struct socket *sock, void __user *userstamp,
3126 		   bool timeval, bool time32)
3127 {
3128 	struct sock *sk = sock->sk;
3129 	struct timespec64 ts;
3130 
3131 	sock_enable_timestamp(sk, SOCK_TIMESTAMP);
3132 	ts = ktime_to_timespec64(sock_read_timestamp(sk));
3133 	if (ts.tv_sec == -1)
3134 		return -ENOENT;
3135 	if (ts.tv_sec == 0) {
3136 		ktime_t kt = ktime_get_real();
3137 		sock_write_timestamp(sk, kt);
3138 		ts = ktime_to_timespec64(kt);
3139 	}
3140 
3141 	if (timeval)
3142 		ts.tv_nsec /= 1000;
3143 
3144 #ifdef CONFIG_COMPAT_32BIT_TIME
3145 	if (time32)
3146 		return put_old_timespec32(&ts, userstamp);
3147 #endif
3148 #ifdef CONFIG_SPARC64
3149 	/* beware of padding in sparc64 timeval */
3150 	if (timeval && !in_compat_syscall()) {
3151 		struct __kernel_old_timeval __user tv = {
3152 			.tv_sec = ts.tv_sec,
3153 			.tv_usec = ts.tv_nsec,
3154 		};
3155 		if (copy_to_user(userstamp, &tv, sizeof(tv)))
3156 			return -EFAULT;
3157 		return 0;
3158 	}
3159 #endif
3160 	return put_timespec64(&ts, userstamp);
3161 }
3162 EXPORT_SYMBOL(sock_gettstamp);
3163 
sock_enable_timestamp(struct sock * sk,enum sock_flags flag)3164 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag)
3165 {
3166 	if (!sock_flag(sk, flag)) {
3167 		unsigned long previous_flags = sk->sk_flags;
3168 
3169 		sock_set_flag(sk, flag);
3170 		/*
3171 		 * we just set one of the two flags which require net
3172 		 * time stamping, but time stamping might have been on
3173 		 * already because of the other one
3174 		 */
3175 		if (sock_needs_netstamp(sk) &&
3176 		    !(previous_flags & SK_FLAGS_TIMESTAMP))
3177 			net_enable_timestamp();
3178 	}
3179 }
3180 
sock_recv_errqueue(struct sock * sk,struct msghdr * msg,int len,int level,int type)3181 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
3182 		       int level, int type)
3183 {
3184 	struct sock_exterr_skb *serr;
3185 	struct sk_buff *skb;
3186 	int copied, err;
3187 
3188 	err = -EAGAIN;
3189 	skb = sock_dequeue_err_skb(sk);
3190 	if (skb == NULL)
3191 		goto out;
3192 
3193 	copied = skb->len;
3194 	if (copied > len) {
3195 		msg->msg_flags |= MSG_TRUNC;
3196 		copied = len;
3197 	}
3198 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3199 	if (err)
3200 		goto out_free_skb;
3201 
3202 	sock_recv_timestamp(msg, sk, skb);
3203 
3204 	serr = SKB_EXT_ERR(skb);
3205 	put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
3206 
3207 	msg->msg_flags |= MSG_ERRQUEUE;
3208 	err = copied;
3209 
3210 out_free_skb:
3211 	kfree_skb(skb);
3212 out:
3213 	return err;
3214 }
3215 EXPORT_SYMBOL(sock_recv_errqueue);
3216 
3217 /*
3218  *	Get a socket option on an socket.
3219  *
3220  *	FIX: POSIX 1003.1g is very ambiguous here. It states that
3221  *	asynchronous errors should be reported by getsockopt. We assume
3222  *	this means if you specify SO_ERROR (otherwise whats the point of it).
3223  */
sock_common_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)3224 int sock_common_getsockopt(struct socket *sock, int level, int optname,
3225 			   char __user *optval, int __user *optlen)
3226 {
3227 	struct sock *sk = sock->sk;
3228 
3229 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
3230 	return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen);
3231 }
3232 EXPORT_SYMBOL(sock_common_getsockopt);
3233 
sock_common_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)3234 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3235 			int flags)
3236 {
3237 	struct sock *sk = sock->sk;
3238 	int addr_len = 0;
3239 	int err;
3240 
3241 	err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT,
3242 				   flags & ~MSG_DONTWAIT, &addr_len);
3243 	if (err >= 0)
3244 		msg->msg_namelen = addr_len;
3245 	return err;
3246 }
3247 EXPORT_SYMBOL(sock_common_recvmsg);
3248 
3249 /*
3250  *	Set socket options on an inet socket.
3251  */
sock_common_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)3252 int sock_common_setsockopt(struct socket *sock, int level, int optname,
3253 			   sockptr_t optval, unsigned int optlen)
3254 {
3255 	struct sock *sk = sock->sk;
3256 
3257 	/* IPV6_ADDRFORM can change sk->sk_prot under us. */
3258 	return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen);
3259 }
3260 EXPORT_SYMBOL(sock_common_setsockopt);
3261 
sk_common_release(struct sock * sk)3262 void sk_common_release(struct sock *sk)
3263 {
3264 	if (sk->sk_prot->destroy)
3265 		sk->sk_prot->destroy(sk);
3266 
3267 	/*
3268 	 * Observation: when sk_common_release is called, processes have
3269 	 * no access to socket. But net still has.
3270 	 * Step one, detach it from networking:
3271 	 *
3272 	 * A. Remove from hash tables.
3273 	 */
3274 
3275 	sk->sk_prot->unhash(sk);
3276 
3277 	/*
3278 	 * In this point socket cannot receive new packets, but it is possible
3279 	 * that some packets are in flight because some CPU runs receiver and
3280 	 * did hash table lookup before we unhashed socket. They will achieve
3281 	 * receive queue and will be purged by socket destructor.
3282 	 *
3283 	 * Also we still have packets pending on receive queue and probably,
3284 	 * our own packets waiting in device queues. sock_destroy will drain
3285 	 * receive queue, but transmitted packets will delay socket destruction
3286 	 * until the last reference will be released.
3287 	 */
3288 
3289 	sock_orphan(sk);
3290 
3291 	xfrm_sk_free_policy(sk);
3292 
3293 	sk_refcnt_debug_release(sk);
3294 
3295 	sock_put(sk);
3296 }
3297 EXPORT_SYMBOL(sk_common_release);
3298 
sk_get_meminfo(const struct sock * sk,u32 * mem)3299 void sk_get_meminfo(const struct sock *sk, u32 *mem)
3300 {
3301 	memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
3302 
3303 	mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
3304 	mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
3305 	mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
3306 	mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
3307 	mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
3308 	mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
3309 	mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
3310 	mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
3311 	mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
3312 }
3313 
3314 #ifdef CONFIG_PROC_FS
3315 #define PROTO_INUSE_NR	64	/* should be enough for the first time */
3316 struct prot_inuse {
3317 	int val[PROTO_INUSE_NR];
3318 };
3319 
3320 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
3321 
sock_prot_inuse_add(struct net * net,struct proto * prot,int val)3322 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
3323 {
3324 	__this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
3325 }
3326 EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
3327 
sock_prot_inuse_get(struct net * net,struct proto * prot)3328 int sock_prot_inuse_get(struct net *net, struct proto *prot)
3329 {
3330 	int cpu, idx = prot->inuse_idx;
3331 	int res = 0;
3332 
3333 	for_each_possible_cpu(cpu)
3334 		res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx];
3335 
3336 	return res >= 0 ? res : 0;
3337 }
3338 EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
3339 
sock_inuse_add(struct net * net,int val)3340 static void sock_inuse_add(struct net *net, int val)
3341 {
3342 	this_cpu_add(*net->core.sock_inuse, val);
3343 }
3344 
sock_inuse_get(struct net * net)3345 int sock_inuse_get(struct net *net)
3346 {
3347 	int cpu, res = 0;
3348 
3349 	for_each_possible_cpu(cpu)
3350 		res += *per_cpu_ptr(net->core.sock_inuse, cpu);
3351 
3352 	return res;
3353 }
3354 
3355 EXPORT_SYMBOL_GPL(sock_inuse_get);
3356 
sock_inuse_init_net(struct net * net)3357 static int __net_init sock_inuse_init_net(struct net *net)
3358 {
3359 	net->core.prot_inuse = alloc_percpu(struct prot_inuse);
3360 	if (net->core.prot_inuse == NULL)
3361 		return -ENOMEM;
3362 
3363 	net->core.sock_inuse = alloc_percpu(int);
3364 	if (net->core.sock_inuse == NULL)
3365 		goto out;
3366 
3367 	return 0;
3368 
3369 out:
3370 	free_percpu(net->core.prot_inuse);
3371 	return -ENOMEM;
3372 }
3373 
sock_inuse_exit_net(struct net * net)3374 static void __net_exit sock_inuse_exit_net(struct net *net)
3375 {
3376 	free_percpu(net->core.prot_inuse);
3377 	free_percpu(net->core.sock_inuse);
3378 }
3379 
3380 static struct pernet_operations net_inuse_ops = {
3381 	.init = sock_inuse_init_net,
3382 	.exit = sock_inuse_exit_net,
3383 };
3384 
net_inuse_init(void)3385 static __init int net_inuse_init(void)
3386 {
3387 	if (register_pernet_subsys(&net_inuse_ops))
3388 		panic("Cannot initialize net inuse counters");
3389 
3390 	return 0;
3391 }
3392 
3393 core_initcall(net_inuse_init);
3394 
assign_proto_idx(struct proto * prot)3395 static int assign_proto_idx(struct proto *prot)
3396 {
3397 	prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
3398 
3399 	if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
3400 		pr_err("PROTO_INUSE_NR exhausted\n");
3401 		return -ENOSPC;
3402 	}
3403 
3404 	set_bit(prot->inuse_idx, proto_inuse_idx);
3405 	return 0;
3406 }
3407 
release_proto_idx(struct proto * prot)3408 static void release_proto_idx(struct proto *prot)
3409 {
3410 	if (prot->inuse_idx != PROTO_INUSE_NR - 1)
3411 		clear_bit(prot->inuse_idx, proto_inuse_idx);
3412 }
3413 #else
assign_proto_idx(struct proto * prot)3414 static inline int assign_proto_idx(struct proto *prot)
3415 {
3416 	return 0;
3417 }
3418 
release_proto_idx(struct proto * prot)3419 static inline void release_proto_idx(struct proto *prot)
3420 {
3421 }
3422 
sock_inuse_add(struct net * net,int val)3423 static void sock_inuse_add(struct net *net, int val)
3424 {
3425 }
3426 #endif
3427 
tw_prot_cleanup(struct timewait_sock_ops * twsk_prot)3428 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
3429 {
3430 	if (!twsk_prot)
3431 		return;
3432 	kfree(twsk_prot->twsk_slab_name);
3433 	twsk_prot->twsk_slab_name = NULL;
3434 	kmem_cache_destroy(twsk_prot->twsk_slab);
3435 	twsk_prot->twsk_slab = NULL;
3436 }
3437 
req_prot_cleanup(struct request_sock_ops * rsk_prot)3438 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
3439 {
3440 	if (!rsk_prot)
3441 		return;
3442 	kfree(rsk_prot->slab_name);
3443 	rsk_prot->slab_name = NULL;
3444 	kmem_cache_destroy(rsk_prot->slab);
3445 	rsk_prot->slab = NULL;
3446 }
3447 
req_prot_init(const struct proto * prot)3448 static int req_prot_init(const struct proto *prot)
3449 {
3450 	struct request_sock_ops *rsk_prot = prot->rsk_prot;
3451 
3452 	if (!rsk_prot)
3453 		return 0;
3454 
3455 	rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s",
3456 					prot->name);
3457 	if (!rsk_prot->slab_name)
3458 		return -ENOMEM;
3459 
3460 	rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
3461 					   rsk_prot->obj_size, 0,
3462 					   SLAB_ACCOUNT | prot->slab_flags,
3463 					   NULL);
3464 
3465 	if (!rsk_prot->slab) {
3466 		pr_crit("%s: Can't create request sock SLAB cache!\n",
3467 			prot->name);
3468 		return -ENOMEM;
3469 	}
3470 	return 0;
3471 }
3472 
proto_register(struct proto * prot,int alloc_slab)3473 int proto_register(struct proto *prot, int alloc_slab)
3474 {
3475 	int ret = -ENOBUFS;
3476 
3477 	if (alloc_slab) {
3478 		prot->slab = kmem_cache_create_usercopy(prot->name,
3479 					prot->obj_size, 0,
3480 					SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
3481 					prot->slab_flags,
3482 					prot->useroffset, prot->usersize,
3483 					NULL);
3484 
3485 		if (prot->slab == NULL) {
3486 			pr_crit("%s: Can't create sock SLAB cache!\n",
3487 				prot->name);
3488 			goto out;
3489 		}
3490 
3491 		if (req_prot_init(prot))
3492 			goto out_free_request_sock_slab;
3493 
3494 		if (prot->twsk_prot != NULL) {
3495 			prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
3496 
3497 			if (prot->twsk_prot->twsk_slab_name == NULL)
3498 				goto out_free_request_sock_slab;
3499 
3500 			prot->twsk_prot->twsk_slab =
3501 				kmem_cache_create(prot->twsk_prot->twsk_slab_name,
3502 						  prot->twsk_prot->twsk_obj_size,
3503 						  0,
3504 						  SLAB_ACCOUNT |
3505 						  prot->slab_flags,
3506 						  NULL);
3507 			if (prot->twsk_prot->twsk_slab == NULL)
3508 				goto out_free_timewait_sock_slab;
3509 		}
3510 	}
3511 
3512 	mutex_lock(&proto_list_mutex);
3513 	ret = assign_proto_idx(prot);
3514 	if (ret) {
3515 		mutex_unlock(&proto_list_mutex);
3516 		goto out_free_timewait_sock_slab;
3517 	}
3518 	list_add(&prot->node, &proto_list);
3519 	mutex_unlock(&proto_list_mutex);
3520 	return ret;
3521 
3522 out_free_timewait_sock_slab:
3523 	if (alloc_slab && prot->twsk_prot)
3524 		tw_prot_cleanup(prot->twsk_prot);
3525 out_free_request_sock_slab:
3526 	if (alloc_slab) {
3527 		req_prot_cleanup(prot->rsk_prot);
3528 
3529 		kmem_cache_destroy(prot->slab);
3530 		prot->slab = NULL;
3531 	}
3532 out:
3533 	return ret;
3534 }
3535 EXPORT_SYMBOL(proto_register);
3536 
proto_unregister(struct proto * prot)3537 void proto_unregister(struct proto *prot)
3538 {
3539 	mutex_lock(&proto_list_mutex);
3540 	release_proto_idx(prot);
3541 	list_del(&prot->node);
3542 	mutex_unlock(&proto_list_mutex);
3543 
3544 	kmem_cache_destroy(prot->slab);
3545 	prot->slab = NULL;
3546 
3547 	req_prot_cleanup(prot->rsk_prot);
3548 	tw_prot_cleanup(prot->twsk_prot);
3549 }
3550 EXPORT_SYMBOL(proto_unregister);
3551 
sock_load_diag_module(int family,int protocol)3552 int sock_load_diag_module(int family, int protocol)
3553 {
3554 	if (!protocol) {
3555 		if (!sock_is_registered(family))
3556 			return -ENOENT;
3557 
3558 		return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
3559 				      NETLINK_SOCK_DIAG, family);
3560 	}
3561 
3562 #ifdef CONFIG_INET
3563 	if (family == AF_INET &&
3564 	    protocol != IPPROTO_RAW &&
3565 	    protocol < MAX_INET_PROTOS &&
3566 	    !rcu_access_pointer(inet_protos[protocol]))
3567 		return -ENOENT;
3568 #endif
3569 
3570 	return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
3571 			      NETLINK_SOCK_DIAG, family, protocol);
3572 }
3573 EXPORT_SYMBOL(sock_load_diag_module);
3574 
3575 #ifdef CONFIG_PROC_FS
proto_seq_start(struct seq_file * seq,loff_t * pos)3576 static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
3577 	__acquires(proto_list_mutex)
3578 {
3579 	mutex_lock(&proto_list_mutex);
3580 	return seq_list_start_head(&proto_list, *pos);
3581 }
3582 
proto_seq_next(struct seq_file * seq,void * v,loff_t * pos)3583 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3584 {
3585 	return seq_list_next(v, &proto_list, pos);
3586 }
3587 
proto_seq_stop(struct seq_file * seq,void * v)3588 static void proto_seq_stop(struct seq_file *seq, void *v)
3589 	__releases(proto_list_mutex)
3590 {
3591 	mutex_unlock(&proto_list_mutex);
3592 }
3593 
proto_method_implemented(const void * method)3594 static char proto_method_implemented(const void *method)
3595 {
3596 	return method == NULL ? 'n' : 'y';
3597 }
sock_prot_memory_allocated(struct proto * proto)3598 static long sock_prot_memory_allocated(struct proto *proto)
3599 {
3600 	return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
3601 }
3602 
sock_prot_memory_pressure(struct proto * proto)3603 static const char *sock_prot_memory_pressure(struct proto *proto)
3604 {
3605 	return proto->memory_pressure != NULL ?
3606 	proto_memory_pressure(proto) ? "yes" : "no" : "NI";
3607 }
3608 
proto_seq_printf(struct seq_file * seq,struct proto * proto)3609 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
3610 {
3611 
3612 	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
3613 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
3614 		   proto->name,
3615 		   proto->obj_size,
3616 		   sock_prot_inuse_get(seq_file_net(seq), proto),
3617 		   sock_prot_memory_allocated(proto),
3618 		   sock_prot_memory_pressure(proto),
3619 		   proto->max_header,
3620 		   proto->slab == NULL ? "no" : "yes",
3621 		   module_name(proto->owner),
3622 		   proto_method_implemented(proto->close),
3623 		   proto_method_implemented(proto->connect),
3624 		   proto_method_implemented(proto->disconnect),
3625 		   proto_method_implemented(proto->accept),
3626 		   proto_method_implemented(proto->ioctl),
3627 		   proto_method_implemented(proto->init),
3628 		   proto_method_implemented(proto->destroy),
3629 		   proto_method_implemented(proto->shutdown),
3630 		   proto_method_implemented(proto->setsockopt),
3631 		   proto_method_implemented(proto->getsockopt),
3632 		   proto_method_implemented(proto->sendmsg),
3633 		   proto_method_implemented(proto->recvmsg),
3634 		   proto_method_implemented(proto->sendpage),
3635 		   proto_method_implemented(proto->bind),
3636 		   proto_method_implemented(proto->backlog_rcv),
3637 		   proto_method_implemented(proto->hash),
3638 		   proto_method_implemented(proto->unhash),
3639 		   proto_method_implemented(proto->get_port),
3640 		   proto_method_implemented(proto->enter_memory_pressure));
3641 }
3642 
proto_seq_show(struct seq_file * seq,void * v)3643 static int proto_seq_show(struct seq_file *seq, void *v)
3644 {
3645 	if (v == &proto_list)
3646 		seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
3647 			   "protocol",
3648 			   "size",
3649 			   "sockets",
3650 			   "memory",
3651 			   "press",
3652 			   "maxhdr",
3653 			   "slab",
3654 			   "module",
3655 			   "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
3656 	else
3657 		proto_seq_printf(seq, list_entry(v, struct proto, node));
3658 	return 0;
3659 }
3660 
3661 static const struct seq_operations proto_seq_ops = {
3662 	.start  = proto_seq_start,
3663 	.next   = proto_seq_next,
3664 	.stop   = proto_seq_stop,
3665 	.show   = proto_seq_show,
3666 };
3667 
proto_init_net(struct net * net)3668 static __net_init int proto_init_net(struct net *net)
3669 {
3670 	if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops,
3671 			sizeof(struct seq_net_private)))
3672 		return -ENOMEM;
3673 
3674 	return 0;
3675 }
3676 
proto_exit_net(struct net * net)3677 static __net_exit void proto_exit_net(struct net *net)
3678 {
3679 	remove_proc_entry("protocols", net->proc_net);
3680 }
3681 
3682 
3683 static __net_initdata struct pernet_operations proto_net_ops = {
3684 	.init = proto_init_net,
3685 	.exit = proto_exit_net,
3686 };
3687 
proto_init(void)3688 static int __init proto_init(void)
3689 {
3690 	return register_pernet_subsys(&proto_net_ops);
3691 }
3692 
3693 subsys_initcall(proto_init);
3694 
3695 #endif /* PROC_FS */
3696 
3697 #ifdef CONFIG_NET_RX_BUSY_POLL
sk_busy_loop_end(void * p,unsigned long start_time)3698 bool sk_busy_loop_end(void *p, unsigned long start_time)
3699 {
3700 	struct sock *sk = p;
3701 
3702 	return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
3703 	       sk_busy_loop_timeout(sk, start_time);
3704 }
3705 EXPORT_SYMBOL(sk_busy_loop_end);
3706 #endif /* CONFIG_NET_RX_BUSY_POLL */
3707 
sock_bind_add(struct sock * sk,struct sockaddr * addr,int addr_len)3708 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len)
3709 {
3710 	if (!sk->sk_prot->bind_add)
3711 		return -EOPNOTSUPP;
3712 	return sk->sk_prot->bind_add(sk, addr, addr_len);
3713 }
3714 EXPORT_SYMBOL(sock_bind_add);
3715