• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Support for INET connection oriented protocols.
8  *
9  * Authors:	See the TCP sources
10  */
11 
12 #include <linux/module.h>
13 #include <linux/jhash.h>
14 
15 #include <net/inet_connection_sock.h>
16 #include <net/inet_hashtables.h>
17 #include <net/inet_timewait_sock.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/tcp_states.h>
21 #include <net/xfrm.h>
22 #include <net/tcp.h>
23 #include <net/sock_reuseport.h>
24 #include <net/addrconf.h>
25 
26 #if IS_ENABLED(CONFIG_IPV6)
27 /* match_sk*_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses
28  *				if IPv6 only, and any IPv4 addresses
29  *				if not IPv6 only
30  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
31  *				IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
32  *				and 0.0.0.0 equals to 0.0.0.0 only
33  */
ipv6_rcv_saddr_equal(const struct in6_addr * sk1_rcv_saddr6,const struct in6_addr * sk2_rcv_saddr6,__be32 sk1_rcv_saddr,__be32 sk2_rcv_saddr,bool sk1_ipv6only,bool sk2_ipv6only,bool match_sk1_wildcard,bool match_sk2_wildcard)34 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
35 				 const struct in6_addr *sk2_rcv_saddr6,
36 				 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
37 				 bool sk1_ipv6only, bool sk2_ipv6only,
38 				 bool match_sk1_wildcard,
39 				 bool match_sk2_wildcard)
40 {
41 	int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
42 	int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
43 
44 	/* if both are mapped, treat as IPv4 */
45 	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
46 		if (!sk2_ipv6only) {
47 			if (sk1_rcv_saddr == sk2_rcv_saddr)
48 				return true;
49 			return (match_sk1_wildcard && !sk1_rcv_saddr) ||
50 				(match_sk2_wildcard && !sk2_rcv_saddr);
51 		}
52 		return false;
53 	}
54 
55 	if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
56 		return true;
57 
58 	if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
59 	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
60 		return true;
61 
62 	if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
63 	    !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
64 		return true;
65 
66 	if (sk2_rcv_saddr6 &&
67 	    ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
68 		return true;
69 
70 	return false;
71 }
72 #endif
73 
74 /* match_sk*_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
75  * match_sk*_wildcard == false: addresses must be exactly the same, i.e.
76  *				0.0.0.0 only equals to 0.0.0.0
77  */
ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr,__be32 sk2_rcv_saddr,bool sk2_ipv6only,bool match_sk1_wildcard,bool match_sk2_wildcard)78 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
79 				 bool sk2_ipv6only, bool match_sk1_wildcard,
80 				 bool match_sk2_wildcard)
81 {
82 	if (!sk2_ipv6only) {
83 		if (sk1_rcv_saddr == sk2_rcv_saddr)
84 			return true;
85 		return (match_sk1_wildcard && !sk1_rcv_saddr) ||
86 			(match_sk2_wildcard && !sk2_rcv_saddr);
87 	}
88 	return false;
89 }
90 
inet_rcv_saddr_equal(const struct sock * sk,const struct sock * sk2,bool match_wildcard)91 bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
92 			  bool match_wildcard)
93 {
94 #if IS_ENABLED(CONFIG_IPV6)
95 	if (sk->sk_family == AF_INET6)
96 		return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
97 					    inet6_rcv_saddr(sk2),
98 					    sk->sk_rcv_saddr,
99 					    sk2->sk_rcv_saddr,
100 					    ipv6_only_sock(sk),
101 					    ipv6_only_sock(sk2),
102 					    match_wildcard,
103 					    match_wildcard);
104 #endif
105 	return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
106 				    ipv6_only_sock(sk2), match_wildcard,
107 				    match_wildcard);
108 }
109 EXPORT_SYMBOL(inet_rcv_saddr_equal);
110 
inet_rcv_saddr_any(const struct sock * sk)111 bool inet_rcv_saddr_any(const struct sock *sk)
112 {
113 #if IS_ENABLED(CONFIG_IPV6)
114 	if (sk->sk_family == AF_INET6)
115 		return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
116 #endif
117 	return !sk->sk_rcv_saddr;
118 }
119 
inet_get_local_port_range(const struct net * net,int * low,int * high)120 void inet_get_local_port_range(const struct net *net, int *low, int *high)
121 {
122 	unsigned int seq;
123 
124 	do {
125 		seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
126 
127 		*low = net->ipv4.ip_local_ports.range[0];
128 		*high = net->ipv4.ip_local_ports.range[1];
129 	} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
130 }
131 EXPORT_SYMBOL(inet_get_local_port_range);
132 
inet_sk_get_local_port_range(const struct sock * sk,int * low,int * high)133 void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
134 {
135 	const struct inet_sock *inet = inet_sk(sk);
136 	const struct net *net = sock_net(sk);
137 	int lo, hi, sk_lo, sk_hi;
138 
139 	inet_get_local_port_range(net, &lo, &hi);
140 
141 	sk_lo = inet->local_port_range.lo;
142 	sk_hi = inet->local_port_range.hi;
143 
144 	if (unlikely(lo <= sk_lo && sk_lo <= hi))
145 		lo = sk_lo;
146 	if (unlikely(lo <= sk_hi && sk_hi <= hi))
147 		hi = sk_hi;
148 
149 	*low = lo;
150 	*high = hi;
151 }
152 EXPORT_SYMBOL(inet_sk_get_local_port_range);
153 
inet_use_bhash2_on_bind(const struct sock * sk)154 static bool inet_use_bhash2_on_bind(const struct sock *sk)
155 {
156 #if IS_ENABLED(CONFIG_IPV6)
157 	if (sk->sk_family == AF_INET6) {
158 		int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
159 
160 		return addr_type != IPV6_ADDR_ANY &&
161 			addr_type != IPV6_ADDR_MAPPED;
162 	}
163 #endif
164 	return sk->sk_rcv_saddr != htonl(INADDR_ANY);
165 }
166 
inet_bind_conflict(const struct sock * sk,struct sock * sk2,kuid_t sk_uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)167 static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
168 			       kuid_t sk_uid, bool relax,
169 			       bool reuseport_cb_ok, bool reuseport_ok)
170 {
171 	int bound_dev_if2;
172 
173 	if (sk == sk2)
174 		return false;
175 
176 	bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
177 
178 	if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
179 	    sk->sk_bound_dev_if == bound_dev_if2) {
180 		if (sk->sk_reuse && sk2->sk_reuse &&
181 		    sk2->sk_state != TCP_LISTEN) {
182 			if (!relax || (!reuseport_ok && sk->sk_reuseport &&
183 				       sk2->sk_reuseport && reuseport_cb_ok &&
184 				       (sk2->sk_state == TCP_TIME_WAIT ||
185 					uid_eq(sk_uid, sock_i_uid(sk2)))))
186 				return true;
187 		} else if (!reuseport_ok || !sk->sk_reuseport ||
188 			   !sk2->sk_reuseport || !reuseport_cb_ok ||
189 			   (sk2->sk_state != TCP_TIME_WAIT &&
190 			    !uid_eq(sk_uid, sock_i_uid(sk2)))) {
191 			return true;
192 		}
193 	}
194 	return false;
195 }
196 
__inet_bhash2_conflict(const struct sock * sk,struct sock * sk2,kuid_t sk_uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)197 static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
198 				   kuid_t sk_uid, bool relax,
199 				   bool reuseport_cb_ok, bool reuseport_ok)
200 {
201 	if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
202 		return false;
203 
204 	return inet_bind_conflict(sk, sk2, sk_uid, relax,
205 				  reuseport_cb_ok, reuseport_ok);
206 }
207 
inet_bhash2_conflict(const struct sock * sk,const struct inet_bind2_bucket * tb2,kuid_t sk_uid,bool relax,bool reuseport_cb_ok,bool reuseport_ok)208 static bool inet_bhash2_conflict(const struct sock *sk,
209 				 const struct inet_bind2_bucket *tb2,
210 				 kuid_t sk_uid,
211 				 bool relax, bool reuseport_cb_ok,
212 				 bool reuseport_ok)
213 {
214 	struct inet_timewait_sock *tw2;
215 	struct sock *sk2;
216 
217 	sk_for_each_bound_bhash2(sk2, &tb2->owners) {
218 		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
219 					   reuseport_cb_ok, reuseport_ok))
220 			return true;
221 	}
222 
223 	twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) {
224 		sk2 = (struct sock *)tw2;
225 
226 		if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
227 					   reuseport_cb_ok, reuseport_ok))
228 			return true;
229 	}
230 
231 	return false;
232 }
233 
234 /* This should be called only when the tb and tb2 hashbuckets' locks are held */
inet_csk_bind_conflict(const struct sock * sk,const struct inet_bind_bucket * tb,const struct inet_bind2_bucket * tb2,bool relax,bool reuseport_ok)235 static int inet_csk_bind_conflict(const struct sock *sk,
236 				  const struct inet_bind_bucket *tb,
237 				  const struct inet_bind2_bucket *tb2, /* may be null */
238 				  bool relax, bool reuseport_ok)
239 {
240 	bool reuseport_cb_ok;
241 	struct sock_reuseport *reuseport_cb;
242 	kuid_t uid = sock_i_uid((struct sock *)sk);
243 
244 	rcu_read_lock();
245 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
246 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
247 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
248 	rcu_read_unlock();
249 
250 	/*
251 	 * Unlike other sk lookup places we do not check
252 	 * for sk_net here, since _all_ the socks listed
253 	 * in tb->owners and tb2->owners list belong
254 	 * to the same net - the one this bucket belongs to.
255 	 */
256 
257 	if (!inet_use_bhash2_on_bind(sk)) {
258 		struct sock *sk2;
259 
260 		sk_for_each_bound(sk2, &tb->owners)
261 			if (inet_bind_conflict(sk, sk2, uid, relax,
262 					       reuseport_cb_ok, reuseport_ok) &&
263 			    inet_rcv_saddr_equal(sk, sk2, true))
264 				return true;
265 
266 		return false;
267 	}
268 
269 	/* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
270 	 * ipv4) should have been checked already. We need to do these two
271 	 * checks separately because their spinlocks have to be acquired/released
272 	 * independently of each other, to prevent possible deadlocks
273 	 */
274 	return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
275 					   reuseport_ok);
276 }
277 
278 /* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
279  * INADDR_ANY (if ipv4) socket.
280  *
281  * Caller must hold bhash hashbucket lock with local bh disabled, to protect
282  * against concurrent binds on the port for addr any
283  */
inet_bhash2_addr_any_conflict(const struct sock * sk,int port,int l3mdev,bool relax,bool reuseport_ok)284 static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
285 					  bool relax, bool reuseport_ok)
286 {
287 	kuid_t uid = sock_i_uid((struct sock *)sk);
288 	const struct net *net = sock_net(sk);
289 	struct sock_reuseport *reuseport_cb;
290 	struct inet_bind_hashbucket *head2;
291 	struct inet_bind2_bucket *tb2;
292 	bool reuseport_cb_ok;
293 
294 	rcu_read_lock();
295 	reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
296 	/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
297 	reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
298 	rcu_read_unlock();
299 
300 	head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
301 
302 	spin_lock(&head2->lock);
303 
304 	inet_bind_bucket_for_each(tb2, &head2->chain)
305 		if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
306 			break;
307 
308 	if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
309 					reuseport_ok)) {
310 		spin_unlock(&head2->lock);
311 		return true;
312 	}
313 
314 	spin_unlock(&head2->lock);
315 	return false;
316 }
317 
318 /*
319  * Find an open port number for the socket.  Returns with the
320  * inet_bind_hashbucket locks held if successful.
321  */
322 static struct inet_bind_hashbucket *
inet_csk_find_open_port(const struct sock * sk,struct inet_bind_bucket ** tb_ret,struct inet_bind2_bucket ** tb2_ret,struct inet_bind_hashbucket ** head2_ret,int * port_ret)323 inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
324 			struct inet_bind2_bucket **tb2_ret,
325 			struct inet_bind_hashbucket **head2_ret, int *port_ret)
326 {
327 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
328 	int i, low, high, attempt_half, port, l3mdev;
329 	struct inet_bind_hashbucket *head, *head2;
330 	struct net *net = sock_net(sk);
331 	struct inet_bind2_bucket *tb2;
332 	struct inet_bind_bucket *tb;
333 	u32 remaining, offset;
334 	bool relax = false;
335 
336 	l3mdev = inet_sk_bound_l3mdev(sk);
337 ports_exhausted:
338 	attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
339 other_half_scan:
340 	inet_sk_get_local_port_range(sk, &low, &high);
341 	high++; /* [32768, 60999] -> [32768, 61000[ */
342 	if (high - low < 4)
343 		attempt_half = 0;
344 	if (attempt_half) {
345 		int half = low + (((high - low) >> 2) << 1);
346 
347 		if (attempt_half == 1)
348 			high = half;
349 		else
350 			low = half;
351 	}
352 	remaining = high - low;
353 	if (likely(remaining > 1))
354 		remaining &= ~1U;
355 
356 	offset = prandom_u32_max(remaining);
357 	/* __inet_hash_connect() favors ports having @low parity
358 	 * We do the opposite to not pollute connect() users.
359 	 */
360 	offset |= 1U;
361 
362 other_parity_scan:
363 	port = low + offset;
364 	for (i = 0; i < remaining; i += 2, port += 2) {
365 		if (unlikely(port >= high))
366 			port -= remaining;
367 		if (inet_is_local_reserved_port(net, port))
368 			continue;
369 		head = &hinfo->bhash[inet_bhashfn(net, port,
370 						  hinfo->bhash_size)];
371 		spin_lock_bh(&head->lock);
372 		if (inet_use_bhash2_on_bind(sk)) {
373 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
374 				goto next_port;
375 		}
376 
377 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
378 		spin_lock(&head2->lock);
379 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
380 		inet_bind_bucket_for_each(tb, &head->chain)
381 			if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
382 				if (!inet_csk_bind_conflict(sk, tb, tb2,
383 							    relax, false))
384 					goto success;
385 				spin_unlock(&head2->lock);
386 				goto next_port;
387 			}
388 		tb = NULL;
389 		goto success;
390 next_port:
391 		spin_unlock_bh(&head->lock);
392 		cond_resched();
393 	}
394 
395 	offset--;
396 	if (!(offset & 1))
397 		goto other_parity_scan;
398 
399 	if (attempt_half == 1) {
400 		/* OK we now try the upper half of the range */
401 		attempt_half = 2;
402 		goto other_half_scan;
403 	}
404 
405 	if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
406 		/* We still have a chance to connect to different destinations */
407 		relax = true;
408 		goto ports_exhausted;
409 	}
410 	return NULL;
411 success:
412 	*port_ret = port;
413 	*tb_ret = tb;
414 	*tb2_ret = tb2;
415 	*head2_ret = head2;
416 	return head;
417 }
418 
sk_reuseport_match(struct inet_bind_bucket * tb,struct sock * sk)419 static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
420 				     struct sock *sk)
421 {
422 	kuid_t uid = sock_i_uid(sk);
423 
424 	if (tb->fastreuseport <= 0)
425 		return 0;
426 	if (!sk->sk_reuseport)
427 		return 0;
428 	if (rcu_access_pointer(sk->sk_reuseport_cb))
429 		return 0;
430 	if (!uid_eq(tb->fastuid, uid))
431 		return 0;
432 	/* We only need to check the rcv_saddr if this tb was once marked
433 	 * without fastreuseport and then was reset, as we can only know that
434 	 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
435 	 * owners list.
436 	 */
437 	if (tb->fastreuseport == FASTREUSEPORT_ANY)
438 		return 1;
439 #if IS_ENABLED(CONFIG_IPV6)
440 	if (tb->fast_sk_family == AF_INET6)
441 		return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
442 					    inet6_rcv_saddr(sk),
443 					    tb->fast_rcv_saddr,
444 					    sk->sk_rcv_saddr,
445 					    tb->fast_ipv6_only,
446 					    ipv6_only_sock(sk), true, false);
447 #endif
448 	return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
449 				    ipv6_only_sock(sk), true, false);
450 }
451 
inet_csk_update_fastreuse(struct inet_bind_bucket * tb,struct sock * sk)452 void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
453 			       struct sock *sk)
454 {
455 	kuid_t uid = sock_i_uid(sk);
456 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
457 
458 	if (hlist_empty(&tb->owners)) {
459 		tb->fastreuse = reuse;
460 		if (sk->sk_reuseport) {
461 			tb->fastreuseport = FASTREUSEPORT_ANY;
462 			tb->fastuid = uid;
463 			tb->fast_rcv_saddr = sk->sk_rcv_saddr;
464 			tb->fast_ipv6_only = ipv6_only_sock(sk);
465 			tb->fast_sk_family = sk->sk_family;
466 #if IS_ENABLED(CONFIG_IPV6)
467 			tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
468 #endif
469 		} else {
470 			tb->fastreuseport = 0;
471 		}
472 	} else {
473 		if (!reuse)
474 			tb->fastreuse = 0;
475 		if (sk->sk_reuseport) {
476 			/* We didn't match or we don't have fastreuseport set on
477 			 * the tb, but we have sk_reuseport set on this socket
478 			 * and we know that there are no bind conflicts with
479 			 * this socket in this tb, so reset our tb's reuseport
480 			 * settings so that any subsequent sockets that match
481 			 * our current socket will be put on the fast path.
482 			 *
483 			 * If we reset we need to set FASTREUSEPORT_STRICT so we
484 			 * do extra checking for all subsequent sk_reuseport
485 			 * socks.
486 			 */
487 			if (!sk_reuseport_match(tb, sk)) {
488 				tb->fastreuseport = FASTREUSEPORT_STRICT;
489 				tb->fastuid = uid;
490 				tb->fast_rcv_saddr = sk->sk_rcv_saddr;
491 				tb->fast_ipv6_only = ipv6_only_sock(sk);
492 				tb->fast_sk_family = sk->sk_family;
493 #if IS_ENABLED(CONFIG_IPV6)
494 				tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
495 #endif
496 			}
497 		} else {
498 			tb->fastreuseport = 0;
499 		}
500 	}
501 }
502 
503 /* Obtain a reference to a local port for the given sock,
504  * if snum is zero it means select any available local port.
505  * We try to allocate an odd port (and leave even ports for connect())
506  */
inet_csk_get_port(struct sock * sk,unsigned short snum)507 int inet_csk_get_port(struct sock *sk, unsigned short snum)
508 {
509 	struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
510 	bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
511 	bool found_port = false, check_bind_conflict = true;
512 	bool bhash_created = false, bhash2_created = false;
513 	int ret = -EADDRINUSE, port = snum, l3mdev;
514 	struct inet_bind_hashbucket *head, *head2;
515 	struct inet_bind2_bucket *tb2 = NULL;
516 	struct inet_bind_bucket *tb = NULL;
517 	bool head2_lock_acquired = false;
518 	struct net *net = sock_net(sk);
519 
520 	l3mdev = inet_sk_bound_l3mdev(sk);
521 
522 	if (!port) {
523 		head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
524 		if (!head)
525 			return ret;
526 
527 		head2_lock_acquired = true;
528 
529 		if (tb && tb2)
530 			goto success;
531 		found_port = true;
532 	} else {
533 		head = &hinfo->bhash[inet_bhashfn(net, port,
534 						  hinfo->bhash_size)];
535 		spin_lock_bh(&head->lock);
536 		inet_bind_bucket_for_each(tb, &head->chain)
537 			if (inet_bind_bucket_match(tb, net, port, l3mdev))
538 				break;
539 	}
540 
541 	if (!tb) {
542 		tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
543 					     head, port, l3mdev);
544 		if (!tb)
545 			goto fail_unlock;
546 		bhash_created = true;
547 	}
548 
549 	if (!found_port) {
550 		if (!hlist_empty(&tb->owners)) {
551 			if (sk->sk_reuse == SK_FORCE_REUSE ||
552 			    (tb->fastreuse > 0 && reuse) ||
553 			    sk_reuseport_match(tb, sk))
554 				check_bind_conflict = false;
555 		}
556 
557 		if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) {
558 			if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
559 				goto fail_unlock;
560 		}
561 
562 		head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
563 		spin_lock(&head2->lock);
564 		head2_lock_acquired = true;
565 		tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
566 	}
567 
568 	if (!tb2) {
569 		tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
570 					       net, head2, port, l3mdev, sk);
571 		if (!tb2)
572 			goto fail_unlock;
573 		bhash2_created = true;
574 	}
575 
576 	if (!found_port && check_bind_conflict) {
577 		if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
578 			goto fail_unlock;
579 	}
580 
581 success:
582 	inet_csk_update_fastreuse(tb, sk);
583 
584 	if (!inet_csk(sk)->icsk_bind_hash)
585 		inet_bind_hash(sk, tb, tb2, port);
586 	WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
587 	WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
588 	ret = 0;
589 
590 fail_unlock:
591 	if (ret) {
592 		if (bhash_created)
593 			inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
594 		if (bhash2_created)
595 			inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
596 						  tb2);
597 	}
598 	if (head2_lock_acquired)
599 		spin_unlock(&head2->lock);
600 	spin_unlock_bh(&head->lock);
601 	return ret;
602 }
603 EXPORT_SYMBOL_GPL(inet_csk_get_port);
604 
605 /*
606  * Wait for an incoming connection, avoid race conditions. This must be called
607  * with the socket locked.
608  */
inet_csk_wait_for_connect(struct sock * sk,long timeo)609 static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
610 {
611 	struct inet_connection_sock *icsk = inet_csk(sk);
612 	DEFINE_WAIT(wait);
613 	int err;
614 
615 	/*
616 	 * True wake-one mechanism for incoming connections: only
617 	 * one process gets woken up, not the 'whole herd'.
618 	 * Since we do not 'race & poll' for established sockets
619 	 * anymore, the common case will execute the loop only once.
620 	 *
621 	 * Subtle issue: "add_wait_queue_exclusive()" will be added
622 	 * after any current non-exclusive waiters, and we know that
623 	 * it will always _stay_ after any new non-exclusive waiters
624 	 * because all non-exclusive waiters are added at the
625 	 * beginning of the wait-queue. As such, it's ok to "drop"
626 	 * our exclusiveness temporarily when we get woken up without
627 	 * having to remove and re-insert us on the wait queue.
628 	 */
629 	for (;;) {
630 		prepare_to_wait_exclusive(sk_sleep(sk), &wait,
631 					  TASK_INTERRUPTIBLE);
632 		release_sock(sk);
633 		if (reqsk_queue_empty(&icsk->icsk_accept_queue))
634 			timeo = schedule_timeout(timeo);
635 		sched_annotate_sleep();
636 		lock_sock(sk);
637 		err = 0;
638 		if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
639 			break;
640 		err = -EINVAL;
641 		if (sk->sk_state != TCP_LISTEN)
642 			break;
643 		err = sock_intr_errno(timeo);
644 		if (signal_pending(current))
645 			break;
646 		err = -EAGAIN;
647 		if (!timeo)
648 			break;
649 	}
650 	finish_wait(sk_sleep(sk), &wait);
651 	return err;
652 }
653 
654 /*
655  * This will accept the next outstanding connection.
656  */
inet_csk_accept(struct sock * sk,int flags,int * err,bool kern)657 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
658 {
659 	struct inet_connection_sock *icsk = inet_csk(sk);
660 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
661 	struct request_sock *req;
662 	struct sock *newsk;
663 	int error;
664 
665 	lock_sock(sk);
666 
667 	/* We need to make sure that this socket is listening,
668 	 * and that it has something pending.
669 	 */
670 	error = -EINVAL;
671 	if (sk->sk_state != TCP_LISTEN)
672 		goto out_err;
673 
674 	/* Find already established connection */
675 	if (reqsk_queue_empty(queue)) {
676 		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
677 
678 		/* If this is a non blocking socket don't sleep */
679 		error = -EAGAIN;
680 		if (!timeo)
681 			goto out_err;
682 
683 		error = inet_csk_wait_for_connect(sk, timeo);
684 		if (error)
685 			goto out_err;
686 	}
687 	req = reqsk_queue_remove(queue, sk);
688 	newsk = req->sk;
689 
690 	if (sk->sk_protocol == IPPROTO_TCP &&
691 	    tcp_rsk(req)->tfo_listener) {
692 		spin_lock_bh(&queue->fastopenq.lock);
693 		if (tcp_rsk(req)->tfo_listener) {
694 			/* We are still waiting for the final ACK from 3WHS
695 			 * so can't free req now. Instead, we set req->sk to
696 			 * NULL to signify that the child socket is taken
697 			 * so reqsk_fastopen_remove() will free the req
698 			 * when 3WHS finishes (or is aborted).
699 			 */
700 			req->sk = NULL;
701 			req = NULL;
702 		}
703 		spin_unlock_bh(&queue->fastopenq.lock);
704 	}
705 
706 out:
707 	release_sock(sk);
708 	if (newsk && mem_cgroup_sockets_enabled) {
709 		int amt;
710 
711 		/* atomically get the memory usage, set and charge the
712 		 * newsk->sk_memcg.
713 		 */
714 		lock_sock(newsk);
715 
716 		/* The socket has not been accepted yet, no need to look at
717 		 * newsk->sk_wmem_queued.
718 		 */
719 		amt = sk_mem_pages(newsk->sk_forward_alloc +
720 				   atomic_read(&newsk->sk_rmem_alloc));
721 		mem_cgroup_sk_alloc(newsk);
722 		if (newsk->sk_memcg && amt)
723 			mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
724 						GFP_KERNEL | __GFP_NOFAIL);
725 
726 		release_sock(newsk);
727 	}
728 	if (req)
729 		reqsk_put(req);
730 
731 	if (newsk)
732 		inet_init_csk_locks(newsk);
733 
734 	return newsk;
735 out_err:
736 	newsk = NULL;
737 	req = NULL;
738 	*err = error;
739 	goto out;
740 }
741 EXPORT_SYMBOL(inet_csk_accept);
742 
743 /*
744  * Using different timers for retransmit, delayed acks and probes
745  * We may wish use just one timer maintaining a list of expire jiffies
746  * to optimize.
747  */
inet_csk_init_xmit_timers(struct sock * sk,void (* retransmit_handler)(struct timer_list * t),void (* delack_handler)(struct timer_list * t),void (* keepalive_handler)(struct timer_list * t))748 void inet_csk_init_xmit_timers(struct sock *sk,
749 			       void (*retransmit_handler)(struct timer_list *t),
750 			       void (*delack_handler)(struct timer_list *t),
751 			       void (*keepalive_handler)(struct timer_list *t))
752 {
753 	struct inet_connection_sock *icsk = inet_csk(sk);
754 
755 	timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
756 	timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
757 	timer_setup(&sk->sk_timer, keepalive_handler, 0);
758 	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
759 }
760 EXPORT_SYMBOL(inet_csk_init_xmit_timers);
761 
inet_csk_clear_xmit_timers(struct sock * sk)762 void inet_csk_clear_xmit_timers(struct sock *sk)
763 {
764 	struct inet_connection_sock *icsk = inet_csk(sk);
765 
766 	icsk->icsk_pending = icsk->icsk_ack.pending = 0;
767 
768 	sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
769 	sk_stop_timer(sk, &icsk->icsk_delack_timer);
770 	sk_stop_timer(sk, &sk->sk_timer);
771 }
772 EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
773 
inet_csk_delete_keepalive_timer(struct sock * sk)774 void inet_csk_delete_keepalive_timer(struct sock *sk)
775 {
776 	sk_stop_timer(sk, &sk->sk_timer);
777 }
778 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
779 
inet_csk_reset_keepalive_timer(struct sock * sk,unsigned long len)780 void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
781 {
782 	sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
783 }
784 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
785 
inet_csk_route_req(const struct sock * sk,struct flowi4 * fl4,const struct request_sock * req)786 struct dst_entry *inet_csk_route_req(const struct sock *sk,
787 				     struct flowi4 *fl4,
788 				     const struct request_sock *req)
789 {
790 	const struct inet_request_sock *ireq = inet_rsk(req);
791 	struct net *net = read_pnet(&ireq->ireq_net);
792 	struct ip_options_rcu *opt;
793 	struct rtable *rt;
794 
795 	rcu_read_lock();
796 	opt = rcu_dereference(ireq->ireq_opt);
797 
798 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
799 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
800 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
801 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
802 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
803 			   htons(ireq->ir_num), sk->sk_uid);
804 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
805 	rt = ip_route_output_flow(net, fl4, sk);
806 	if (IS_ERR(rt))
807 		goto no_route;
808 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
809 		goto route_err;
810 	rcu_read_unlock();
811 	return &rt->dst;
812 
813 route_err:
814 	ip_rt_put(rt);
815 no_route:
816 	rcu_read_unlock();
817 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
818 	return NULL;
819 }
820 EXPORT_SYMBOL_GPL(inet_csk_route_req);
821 
inet_csk_route_child_sock(const struct sock * sk,struct sock * newsk,const struct request_sock * req)822 struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
823 					    struct sock *newsk,
824 					    const struct request_sock *req)
825 {
826 	const struct inet_request_sock *ireq = inet_rsk(req);
827 	struct net *net = read_pnet(&ireq->ireq_net);
828 	struct inet_sock *newinet = inet_sk(newsk);
829 	struct ip_options_rcu *opt;
830 	struct flowi4 *fl4;
831 	struct rtable *rt;
832 
833 	opt = rcu_dereference(ireq->ireq_opt);
834 	fl4 = &newinet->cork.fl.u.ip4;
835 
836 	flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
837 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
838 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
839 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
840 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
841 			   htons(ireq->ir_num), sk->sk_uid);
842 	security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
843 	rt = ip_route_output_flow(net, fl4, sk);
844 	if (IS_ERR(rt))
845 		goto no_route;
846 	if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
847 		goto route_err;
848 	return &rt->dst;
849 
850 route_err:
851 	ip_rt_put(rt);
852 no_route:
853 	__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
854 	return NULL;
855 }
856 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
857 
858 /* Decide when to expire the request and when to resend SYN-ACK */
syn_ack_recalc(struct request_sock * req,const int max_syn_ack_retries,const u8 rskq_defer_accept,int * expire,int * resend)859 static void syn_ack_recalc(struct request_sock *req,
860 			   const int max_syn_ack_retries,
861 			   const u8 rskq_defer_accept,
862 			   int *expire, int *resend)
863 {
864 	if (!rskq_defer_accept) {
865 		*expire = req->num_timeout >= max_syn_ack_retries;
866 		*resend = 1;
867 		return;
868 	}
869 	*expire = req->num_timeout >= max_syn_ack_retries &&
870 		  (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
871 	/* Do not resend while waiting for data after ACK,
872 	 * start to resend on end of deferring period to give
873 	 * last chance for data or ACK to create established socket.
874 	 */
875 	*resend = !inet_rsk(req)->acked ||
876 		  req->num_timeout >= rskq_defer_accept - 1;
877 }
878 
inet_rtx_syn_ack(const struct sock * parent,struct request_sock * req)879 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
880 {
881 	int err = req->rsk_ops->rtx_syn_ack(parent, req);
882 
883 	if (!err)
884 		req->num_retrans++;
885 	return err;
886 }
887 EXPORT_SYMBOL(inet_rtx_syn_ack);
888 
inet_reqsk_clone(struct request_sock * req,struct sock * sk)889 static struct request_sock *inet_reqsk_clone(struct request_sock *req,
890 					     struct sock *sk)
891 {
892 	struct sock *req_sk, *nreq_sk;
893 	struct request_sock *nreq;
894 
895 	nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
896 	if (!nreq) {
897 		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
898 
899 		/* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
900 		sock_put(sk);
901 		return NULL;
902 	}
903 
904 	req_sk = req_to_sk(req);
905 	nreq_sk = req_to_sk(nreq);
906 
907 	memcpy(nreq_sk, req_sk,
908 	       offsetof(struct sock, sk_dontcopy_begin));
909 	memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
910 	       req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
911 
912 	sk_node_init(&nreq_sk->sk_node);
913 	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
914 #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
915 	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
916 #endif
917 	nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
918 
919 	nreq->rsk_listener = sk;
920 
921 	/* We need not acquire fastopenq->lock
922 	 * because the child socket is locked in inet_csk_listen_stop().
923 	 */
924 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
925 		rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
926 
927 	return nreq;
928 }
929 
reqsk_queue_migrated(struct request_sock_queue * queue,const struct request_sock * req)930 static void reqsk_queue_migrated(struct request_sock_queue *queue,
931 				 const struct request_sock *req)
932 {
933 	if (req->num_timeout == 0)
934 		atomic_inc(&queue->young);
935 	atomic_inc(&queue->qlen);
936 }
937 
reqsk_migrate_reset(struct request_sock * req)938 static void reqsk_migrate_reset(struct request_sock *req)
939 {
940 	req->saved_syn = NULL;
941 #if IS_ENABLED(CONFIG_IPV6)
942 	inet_rsk(req)->ipv6_opt = NULL;
943 	inet_rsk(req)->pktopts = NULL;
944 #else
945 	inet_rsk(req)->ireq_opt = NULL;
946 #endif
947 }
948 
949 /* return true if req was found in the ehash table */
reqsk_queue_unlink(struct request_sock * req)950 static bool reqsk_queue_unlink(struct request_sock *req)
951 {
952 	struct sock *sk = req_to_sk(req);
953 	bool found = false;
954 
955 	if (sk_hashed(sk)) {
956 		struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
957 		spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
958 
959 		spin_lock(lock);
960 		found = __sk_nulls_del_node_init_rcu(sk);
961 		spin_unlock(lock);
962 	}
963 	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
964 		reqsk_put(req);
965 	return found;
966 }
967 
inet_csk_reqsk_queue_drop(struct sock * sk,struct request_sock * req)968 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
969 {
970 	bool unlinked = reqsk_queue_unlink(req);
971 
972 	if (unlinked) {
973 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
974 		reqsk_put(req);
975 	}
976 	return unlinked;
977 }
978 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
979 
inet_csk_reqsk_queue_drop_and_put(struct sock * sk,struct request_sock * req)980 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
981 {
982 	inet_csk_reqsk_queue_drop(sk, req);
983 	reqsk_put(req);
984 }
985 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
986 
reqsk_timer_handler(struct timer_list * t)987 static void reqsk_timer_handler(struct timer_list *t)
988 {
989 	struct request_sock *req = from_timer(req, t, rsk_timer);
990 	struct request_sock *nreq = NULL, *oreq = req;
991 	struct sock *sk_listener = req->rsk_listener;
992 	struct inet_connection_sock *icsk;
993 	struct request_sock_queue *queue;
994 	struct net *net;
995 	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
996 
997 	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
998 		struct sock *nsk;
999 
1000 		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
1001 		if (!nsk)
1002 			goto drop;
1003 
1004 		nreq = inet_reqsk_clone(req, nsk);
1005 		if (!nreq)
1006 			goto drop;
1007 
1008 		/* The new timer for the cloned req can decrease the 2
1009 		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
1010 		 * hold another count to prevent use-after-free and
1011 		 * call reqsk_put() just before return.
1012 		 */
1013 		refcount_set(&nreq->rsk_refcnt, 2 + 1);
1014 		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1015 		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
1016 
1017 		req = nreq;
1018 		sk_listener = nsk;
1019 	}
1020 
1021 	icsk = inet_csk(sk_listener);
1022 	net = sock_net(sk_listener);
1023 	max_syn_ack_retries = READ_ONCE(icsk->icsk_syn_retries) ? :
1024 		READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
1025 	/* Normally all the openreqs are young and become mature
1026 	 * (i.e. converted to established socket) for first timeout.
1027 	 * If synack was not acknowledged for 1 second, it means
1028 	 * one of the following things: synack was lost, ack was lost,
1029 	 * rtt is high or nobody planned to ack (i.e. synflood).
1030 	 * When server is a bit loaded, queue is populated with old
1031 	 * open requests, reducing effective size of queue.
1032 	 * When server is well loaded, queue size reduces to zero
1033 	 * after several minutes of work. It is not synflood,
1034 	 * it is normal operation. The solution is pruning
1035 	 * too old entries overriding normal timeout, when
1036 	 * situation becomes dangerous.
1037 	 *
1038 	 * Essentially, we reserve half of room for young
1039 	 * embrions; and abort old ones without pity, if old
1040 	 * ones are about to clog our table.
1041 	 */
1042 	queue = &icsk->icsk_accept_queue;
1043 	qlen = reqsk_queue_len(queue);
1044 	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
1045 		int young = reqsk_queue_len_young(queue) << 1;
1046 
1047 		while (max_syn_ack_retries > 2) {
1048 			if (qlen < young)
1049 				break;
1050 			max_syn_ack_retries--;
1051 			young <<= 1;
1052 		}
1053 	}
1054 	syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
1055 		       &expire, &resend);
1056 	req->rsk_ops->syn_ack_timeout(req);
1057 	if (!expire &&
1058 	    (!resend ||
1059 	     !inet_rtx_syn_ack(sk_listener, req) ||
1060 	     inet_rsk(req)->acked)) {
1061 		if (req->num_timeout++ == 0)
1062 			atomic_dec(&queue->young);
1063 		mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX));
1064 
1065 		if (!nreq)
1066 			return;
1067 
1068 		if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
1069 			/* delete timer */
1070 			inet_csk_reqsk_queue_drop(sk_listener, nreq);
1071 			goto no_ownership;
1072 		}
1073 
1074 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
1075 		reqsk_migrate_reset(oreq);
1076 		reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
1077 		reqsk_put(oreq);
1078 
1079 		reqsk_put(nreq);
1080 		return;
1081 	}
1082 
1083 	/* Even if we can clone the req, we may need not retransmit any more
1084 	 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
1085 	 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
1086 	 */
1087 	if (nreq) {
1088 		__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
1089 no_ownership:
1090 		reqsk_migrate_reset(nreq);
1091 		reqsk_queue_removed(queue, nreq);
1092 		__reqsk_free(nreq);
1093 	}
1094 
1095 drop:
1096 	inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
1097 }
1098 
reqsk_queue_hash_req(struct request_sock * req,unsigned long timeout)1099 static void reqsk_queue_hash_req(struct request_sock *req,
1100 				 unsigned long timeout)
1101 {
1102 	timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1103 	mod_timer(&req->rsk_timer, jiffies + timeout);
1104 
1105 	inet_ehash_insert(req_to_sk(req), NULL, NULL);
1106 	/* before letting lookups find us, make sure all req fields
1107 	 * are committed to memory and refcnt initialized.
1108 	 */
1109 	smp_wmb();
1110 	refcount_set(&req->rsk_refcnt, 2 + 1);
1111 }
1112 
inet_csk_reqsk_queue_hash_add(struct sock * sk,struct request_sock * req,unsigned long timeout)1113 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
1114 				   unsigned long timeout)
1115 {
1116 	reqsk_queue_hash_req(req, timeout);
1117 	inet_csk_reqsk_queue_added(sk);
1118 }
1119 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
1120 
inet_clone_ulp(const struct request_sock * req,struct sock * newsk,const gfp_t priority)1121 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
1122 			   const gfp_t priority)
1123 {
1124 	struct inet_connection_sock *icsk = inet_csk(newsk);
1125 
1126 	if (!icsk->icsk_ulp_ops)
1127 		return;
1128 
1129 	if (icsk->icsk_ulp_ops->clone)
1130 		icsk->icsk_ulp_ops->clone(req, newsk, priority);
1131 }
1132 
1133 /**
1134  *	inet_csk_clone_lock - clone an inet socket, and lock its clone
1135  *	@sk: the socket to clone
1136  *	@req: request_sock
1137  *	@priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
1138  *
1139  *	Caller must unlock socket even in error path (bh_unlock_sock(newsk))
1140  */
inet_csk_clone_lock(const struct sock * sk,const struct request_sock * req,const gfp_t priority)1141 struct sock *inet_csk_clone_lock(const struct sock *sk,
1142 				 const struct request_sock *req,
1143 				 const gfp_t priority)
1144 {
1145 	struct sock *newsk = sk_clone_lock(sk, priority);
1146 
1147 	if (newsk) {
1148 		struct inet_connection_sock *newicsk = inet_csk(newsk);
1149 
1150 		newsk->sk_wait_pending = 0;
1151 		inet_sk_set_state(newsk, TCP_SYN_RECV);
1152 		newicsk->icsk_bind_hash = NULL;
1153 		newicsk->icsk_bind2_hash = NULL;
1154 
1155 		inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
1156 		inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
1157 		inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
1158 
1159 		/* listeners have SOCK_RCU_FREE, not the children */
1160 		sock_reset_flag(newsk, SOCK_RCU_FREE);
1161 
1162 		inet_sk(newsk)->mc_list = NULL;
1163 
1164 		newsk->sk_mark = inet_rsk(req)->ir_mark;
1165 		atomic64_set(&newsk->sk_cookie,
1166 			     atomic64_read(&inet_rsk(req)->ir_cookie));
1167 
1168 		newicsk->icsk_retransmits = 0;
1169 		newicsk->icsk_backoff	  = 0;
1170 		newicsk->icsk_probes_out  = 0;
1171 		newicsk->icsk_probes_tstamp = 0;
1172 
1173 		/* Deinitialize accept_queue to trap illegal accesses. */
1174 		memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
1175 
1176 		inet_clone_ulp(req, newsk, priority);
1177 
1178 		security_inet_csk_clone(newsk, req);
1179 	}
1180 	return newsk;
1181 }
1182 EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
1183 
1184 /*
1185  * At this point, there should be no process reference to this
1186  * socket, and thus no user references at all.  Therefore we
1187  * can assume the socket waitqueue is inactive and nobody will
1188  * try to jump onto it.
1189  */
inet_csk_destroy_sock(struct sock * sk)1190 void inet_csk_destroy_sock(struct sock *sk)
1191 {
1192 	WARN_ON(sk->sk_state != TCP_CLOSE);
1193 	WARN_ON(!sock_flag(sk, SOCK_DEAD));
1194 
1195 	/* It cannot be in hash table! */
1196 	WARN_ON(!sk_unhashed(sk));
1197 
1198 	/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
1199 	WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
1200 
1201 	sk->sk_prot->destroy(sk);
1202 
1203 	sk_stream_kill_queues(sk);
1204 
1205 	xfrm_sk_free_policy(sk);
1206 
1207 	sk_refcnt_debug_release(sk);
1208 
1209 	this_cpu_dec(*sk->sk_prot->orphan_count);
1210 
1211 	sock_put(sk);
1212 }
1213 EXPORT_SYMBOL(inet_csk_destroy_sock);
1214 
1215 /* This function allows to force a closure of a socket after the call to
1216  * tcp/dccp_create_openreq_child().
1217  */
inet_csk_prepare_forced_close(struct sock * sk)1218 void inet_csk_prepare_forced_close(struct sock *sk)
1219 	__releases(&sk->sk_lock.slock)
1220 {
1221 	/* sk_clone_lock locked the socket and set refcnt to 2 */
1222 	bh_unlock_sock(sk);
1223 	sock_put(sk);
1224 	inet_csk_prepare_for_destroy_sock(sk);
1225 	inet_sk(sk)->inet_num = 0;
1226 }
1227 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
1228 
inet_ulp_can_listen(const struct sock * sk)1229 static int inet_ulp_can_listen(const struct sock *sk)
1230 {
1231 	const struct inet_connection_sock *icsk = inet_csk(sk);
1232 
1233 	if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
1234 		return -EINVAL;
1235 
1236 	return 0;
1237 }
1238 
inet_csk_listen_start(struct sock * sk)1239 int inet_csk_listen_start(struct sock *sk)
1240 {
1241 	struct inet_connection_sock *icsk = inet_csk(sk);
1242 	struct inet_sock *inet = inet_sk(sk);
1243 	int err;
1244 
1245 	err = inet_ulp_can_listen(sk);
1246 	if (unlikely(err))
1247 		return err;
1248 
1249 	reqsk_queue_alloc(&icsk->icsk_accept_queue);
1250 
1251 	sk->sk_ack_backlog = 0;
1252 	inet_csk_delack_init(sk);
1253 
1254 	/* There is race window here: we announce ourselves listening,
1255 	 * but this transition is still not validated by get_port().
1256 	 * It is OK, because this socket enters to hash table only
1257 	 * after validation is complete.
1258 	 */
1259 	inet_sk_state_store(sk, TCP_LISTEN);
1260 	err = sk->sk_prot->get_port(sk, inet->inet_num);
1261 	if (!err) {
1262 		inet->inet_sport = htons(inet->inet_num);
1263 
1264 		sk_dst_reset(sk);
1265 		err = sk->sk_prot->hash(sk);
1266 
1267 		if (likely(!err))
1268 			return 0;
1269 	}
1270 
1271 	inet_sk_set_state(sk, TCP_CLOSE);
1272 	return err;
1273 }
1274 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
1275 
inet_child_forget(struct sock * sk,struct request_sock * req,struct sock * child)1276 static void inet_child_forget(struct sock *sk, struct request_sock *req,
1277 			      struct sock *child)
1278 {
1279 	sk->sk_prot->disconnect(child, O_NONBLOCK);
1280 
1281 	sock_orphan(child);
1282 
1283 	this_cpu_inc(*sk->sk_prot->orphan_count);
1284 
1285 	if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
1286 		BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
1287 		BUG_ON(sk != req->rsk_listener);
1288 
1289 		/* Paranoid, to prevent race condition if
1290 		 * an inbound pkt destined for child is
1291 		 * blocked by sock lock in tcp_v4_rcv().
1292 		 * Also to satisfy an assertion in
1293 		 * tcp_v4_destroy_sock().
1294 		 */
1295 		RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
1296 	}
1297 	inet_csk_destroy_sock(child);
1298 }
1299 
inet_csk_reqsk_queue_add(struct sock * sk,struct request_sock * req,struct sock * child)1300 struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
1301 				      struct request_sock *req,
1302 				      struct sock *child)
1303 {
1304 	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1305 
1306 	spin_lock(&queue->rskq_lock);
1307 	if (unlikely(sk->sk_state != TCP_LISTEN)) {
1308 		inet_child_forget(sk, req, child);
1309 		child = NULL;
1310 	} else {
1311 		req->sk = child;
1312 		req->dl_next = NULL;
1313 		if (queue->rskq_accept_head == NULL)
1314 			WRITE_ONCE(queue->rskq_accept_head, req);
1315 		else
1316 			queue->rskq_accept_tail->dl_next = req;
1317 		queue->rskq_accept_tail = req;
1318 		sk_acceptq_added(sk);
1319 	}
1320 	spin_unlock(&queue->rskq_lock);
1321 	return child;
1322 }
1323 EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
1324 
inet_csk_complete_hashdance(struct sock * sk,struct sock * child,struct request_sock * req,bool own_req)1325 struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
1326 					 struct request_sock *req, bool own_req)
1327 {
1328 	if (own_req) {
1329 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
1330 		reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
1331 
1332 		if (sk != req->rsk_listener) {
1333 			/* another listening sk has been selected,
1334 			 * migrate the req to it.
1335 			 */
1336 			struct request_sock *nreq;
1337 
1338 			/* hold a refcnt for the nreq->rsk_listener
1339 			 * which is assigned in inet_reqsk_clone()
1340 			 */
1341 			sock_hold(sk);
1342 			nreq = inet_reqsk_clone(req, sk);
1343 			if (!nreq) {
1344 				inet_child_forget(sk, req, child);
1345 				goto child_put;
1346 			}
1347 
1348 			refcount_set(&nreq->rsk_refcnt, 1);
1349 			if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
1350 				__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
1351 				reqsk_migrate_reset(req);
1352 				reqsk_put(req);
1353 				return child;
1354 			}
1355 
1356 			__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
1357 			reqsk_migrate_reset(nreq);
1358 			__reqsk_free(nreq);
1359 		} else if (inet_csk_reqsk_queue_add(sk, req, child)) {
1360 			return child;
1361 		}
1362 	}
1363 	/* Too bad, another child took ownership of the request, undo. */
1364 child_put:
1365 	bh_unlock_sock(child);
1366 	sock_put(child);
1367 	return NULL;
1368 }
1369 EXPORT_SYMBOL(inet_csk_complete_hashdance);
1370 
1371 /*
1372  *	This routine closes sockets which have been at least partially
1373  *	opened, but not yet accepted.
1374  */
inet_csk_listen_stop(struct sock * sk)1375 void inet_csk_listen_stop(struct sock *sk)
1376 {
1377 	struct inet_connection_sock *icsk = inet_csk(sk);
1378 	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
1379 	struct request_sock *next, *req;
1380 
1381 	/* Following specs, it would be better either to send FIN
1382 	 * (and enter FIN-WAIT-1, it is normal close)
1383 	 * or to send active reset (abort).
1384 	 * Certainly, it is pretty dangerous while synflood, but it is
1385 	 * bad justification for our negligence 8)
1386 	 * To be honest, we are not able to make either
1387 	 * of the variants now.			--ANK
1388 	 */
1389 	while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
1390 		struct sock *child = req->sk, *nsk;
1391 		struct request_sock *nreq;
1392 
1393 		local_bh_disable();
1394 		bh_lock_sock(child);
1395 		WARN_ON(sock_owned_by_user(child));
1396 		sock_hold(child);
1397 
1398 		nsk = reuseport_migrate_sock(sk, child, NULL);
1399 		if (nsk) {
1400 			nreq = inet_reqsk_clone(req, nsk);
1401 			if (nreq) {
1402 				refcount_set(&nreq->rsk_refcnt, 1);
1403 
1404 				if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
1405 					__NET_INC_STATS(sock_net(nsk),
1406 							LINUX_MIB_TCPMIGRATEREQSUCCESS);
1407 					reqsk_migrate_reset(req);
1408 				} else {
1409 					__NET_INC_STATS(sock_net(nsk),
1410 							LINUX_MIB_TCPMIGRATEREQFAILURE);
1411 					reqsk_migrate_reset(nreq);
1412 					__reqsk_free(nreq);
1413 				}
1414 
1415 				/* inet_csk_reqsk_queue_add() has already
1416 				 * called inet_child_forget() on failure case.
1417 				 */
1418 				goto skip_child_forget;
1419 			}
1420 		}
1421 
1422 		inet_child_forget(sk, req, child);
1423 skip_child_forget:
1424 		reqsk_put(req);
1425 		bh_unlock_sock(child);
1426 		local_bh_enable();
1427 		sock_put(child);
1428 
1429 		cond_resched();
1430 	}
1431 	if (queue->fastopenq.rskq_rst_head) {
1432 		/* Free all the reqs queued in rskq_rst_head. */
1433 		spin_lock_bh(&queue->fastopenq.lock);
1434 		req = queue->fastopenq.rskq_rst_head;
1435 		queue->fastopenq.rskq_rst_head = NULL;
1436 		spin_unlock_bh(&queue->fastopenq.lock);
1437 		while (req != NULL) {
1438 			next = req->dl_next;
1439 			reqsk_put(req);
1440 			req = next;
1441 		}
1442 	}
1443 	WARN_ON_ONCE(sk->sk_ack_backlog);
1444 }
1445 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
1446 
inet_csk_addr2sockaddr(struct sock * sk,struct sockaddr * uaddr)1447 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1448 {
1449 	struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1450 	const struct inet_sock *inet = inet_sk(sk);
1451 
1452 	sin->sin_family		= AF_INET;
1453 	sin->sin_addr.s_addr	= inet->inet_daddr;
1454 	sin->sin_port		= inet->inet_dport;
1455 }
1456 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
1457 
inet_csk_rebuild_route(struct sock * sk,struct flowi * fl)1458 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1459 {
1460 	const struct inet_sock *inet = inet_sk(sk);
1461 	const struct ip_options_rcu *inet_opt;
1462 	__be32 daddr = inet->inet_daddr;
1463 	struct flowi4 *fl4;
1464 	struct rtable *rt;
1465 
1466 	rcu_read_lock();
1467 	inet_opt = rcu_dereference(inet->inet_opt);
1468 	if (inet_opt && inet_opt->opt.srr)
1469 		daddr = inet_opt->opt.faddr;
1470 	fl4 = &fl->u.ip4;
1471 	rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1472 				   inet->inet_saddr, inet->inet_dport,
1473 				   inet->inet_sport, sk->sk_protocol,
1474 				   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1475 	if (IS_ERR(rt))
1476 		rt = NULL;
1477 	if (rt)
1478 		sk_setup_caps(sk, &rt->dst);
1479 	rcu_read_unlock();
1480 
1481 	return &rt->dst;
1482 }
1483 
inet_csk_update_pmtu(struct sock * sk,u32 mtu)1484 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1485 {
1486 	struct dst_entry *dst = __sk_dst_check(sk, 0);
1487 	struct inet_sock *inet = inet_sk(sk);
1488 
1489 	if (!dst) {
1490 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1491 		if (!dst)
1492 			goto out;
1493 	}
1494 	dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
1495 
1496 	dst = __sk_dst_check(sk, 0);
1497 	if (!dst)
1498 		dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1499 out:
1500 	return dst;
1501 }
1502 EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
1503