• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  * Authors:	Lotsa people, from code originally in tcp
7  *
8  *	This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _INET_HASHTABLES_H
15 #define _INET_HASHTABLES_H
16 
17 
18 #include <linux/interrupt.h>
19 #include <linux/ip.h>
20 #include <linux/ipv6.h>
21 #include <linux/list.h>
22 #include <linux/slab.h>
23 #include <linux/socket.h>
24 #include <linux/spinlock.h>
25 #include <linux/types.h>
26 #include <linux/wait.h>
27 
28 #include <net/inet_connection_sock.h>
29 #include <net/inet_sock.h>
30 #include <net/sock.h>
31 #include <net/route.h>
32 #include <net/tcp_states.h>
33 #include <net/netns/hash.h>
34 
35 #include <linux/refcount.h>
36 #include <asm/byteorder.h>
37 
38 /* This is for all connections with a full identity, no wildcards.
39  * The 'e' prefix stands for Establish, but we really put all sockets
40  * but LISTEN ones.
41  */
42 struct inet_ehash_bucket {
43 	struct hlist_nulls_head chain;
44 };
45 
46 /* There are a few simple rules, which allow for local port reuse by
47  * an application.  In essence:
48  *
49  *	1) Sockets bound to different interfaces may share a local port.
50  *	   Failing that, goto test 2.
51  *	2) If all sockets have sk->sk_reuse set, and none of them are in
52  *	   TCP_LISTEN state, the port may be shared.
53  *	   Failing that, goto test 3.
54  *	3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
55  *	   address, and none of them are the same, the port may be
56  *	   shared.
57  *	   Failing this, the port cannot be shared.
58  *
59  * The interesting point, is test #2.  This is what an FTP server does
60  * all day.  To optimize this case we use a specific flag bit defined
61  * below.  As we add sockets to a bind bucket list, we perform a
62  * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
63  * As long as all sockets added to a bind bucket pass this test,
64  * the flag bit will be set.
65  * The resulting situation is that tcp_v[46]_verify_bind() can just check
66  * for this flag bit, if it is set and the socket trying to bind has
67  * sk->sk_reuse set, we don't even have to walk the owners list at all,
68  * we return that it is ok to bind this socket to the requested local port.
69  *
70  * Sounds like a lot of work, but it is worth it.  In a more naive
71  * implementation (ie. current FreeBSD etc.) the entire list of ports
72  * must be walked for each data port opened by an ftp server.  Needless
73  * to say, this does not scale at all.  With a couple thousand FTP
74  * users logged onto your box, isn't it nice to know that new data
75  * ports are created in O(1) time?  I thought so. ;-)	-DaveM
76  */
77 #define FASTREUSEPORT_ANY	1
78 #define FASTREUSEPORT_STRICT	2
79 
80 struct inet_bind_bucket {
81 	possible_net_t		ib_net;
82 	unsigned short		port;
83 	signed char		fastreuse;
84 	signed char		fastreuseport;
85 	kuid_t			fastuid;
86 #if IS_ENABLED(CONFIG_IPV6)
87 	struct in6_addr		fast_v6_rcv_saddr;
88 #endif
89 	__be32			fast_rcv_saddr;
90 	unsigned short		fast_sk_family;
91 	bool			fast_ipv6_only;
92 	struct hlist_node	node;
93 	struct hlist_head	owners;
94 };
95 
ib_net(struct inet_bind_bucket * ib)96 static inline struct net *ib_net(struct inet_bind_bucket *ib)
97 {
98 	return read_pnet(&ib->ib_net);
99 }
100 
101 #define inet_bind_bucket_for_each(tb, head) \
102 	hlist_for_each_entry(tb, head, node)
103 
104 struct inet_bind_hashbucket {
105 	spinlock_t		lock;
106 	struct hlist_head	chain;
107 };
108 
109 /* Sockets can be hashed in established or listening table.
110  * We must use different 'nulls' end-of-chain value for all hash buckets :
111  * A socket might transition from ESTABLISH to LISTEN state without
112  * RCU grace period. A lookup in ehash table needs to handle this case.
113  */
114 #define LISTENING_NULLS_BASE (1U << 29)
115 struct inet_listen_hashbucket {
116 	spinlock_t		lock;
117 	union {
118 		struct hlist_head	head;
119 		struct hlist_nulls_head	nulls_head;
120 	};
121 };
122 
123 /* This is for listening sockets, thus all sockets which possess wildcards. */
124 #define INET_LHTABLE_SIZE	32	/* Yes, really, this is all you need. */
125 
126 struct inet_hashinfo {
127 	/* This is for sockets with full identity only.  Sockets here will
128 	 * always be without wildcards and will have the following invariant:
129 	 *
130 	 *          TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
131 	 *
132 	 */
133 	struct inet_ehash_bucket	*ehash;
134 	spinlock_t			*ehash_locks;
135 	unsigned int			ehash_mask;
136 	unsigned int			ehash_locks_mask;
137 
138 	/* Ok, let's try this, I give up, we do need a local binding
139 	 * TCP hash as well as the others for fast bind/connect.
140 	 */
141 	struct inet_bind_hashbucket	*bhash;
142 
143 	unsigned int			bhash_size;
144 	/* 4 bytes hole on 64 bit */
145 
146 	struct kmem_cache		*bind_bucket_cachep;
147 
148 	/* All the above members are written once at bootup and
149 	 * never written again _or_ are predominantly read-access.
150 	 *
151 	 * Now align to a new cache line as all the following members
152 	 * might be often dirty.
153 	 */
154 	/* All sockets in TCP_LISTEN state will be in here.  This is the only
155 	 * table where wildcard'd TCP sockets can exist.  Hash function here
156 	 * is just local port number.
157 	 */
158 	struct inet_listen_hashbucket	listening_hash[INET_LHTABLE_SIZE]
159 					____cacheline_aligned_in_smp;
160 };
161 
inet_ehash_bucket(struct inet_hashinfo * hashinfo,unsigned int hash)162 static inline struct inet_ehash_bucket *inet_ehash_bucket(
163 	struct inet_hashinfo *hashinfo,
164 	unsigned int hash)
165 {
166 	return &hashinfo->ehash[hash & hashinfo->ehash_mask];
167 }
168 
inet_ehash_lockp(struct inet_hashinfo * hashinfo,unsigned int hash)169 static inline spinlock_t *inet_ehash_lockp(
170 	struct inet_hashinfo *hashinfo,
171 	unsigned int hash)
172 {
173 	return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask];
174 }
175 
176 int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo);
177 
inet_ehash_locks_free(struct inet_hashinfo * hashinfo)178 static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
179 {
180 	kvfree(hashinfo->ehash_locks);
181 	hashinfo->ehash_locks = NULL;
182 }
183 
184 struct inet_bind_bucket *
185 inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
186 			struct inet_bind_hashbucket *head,
187 			const unsigned short snum);
188 void inet_bind_bucket_destroy(struct kmem_cache *cachep,
189 			      struct inet_bind_bucket *tb);
190 
inet_bhashfn(const struct net * net,const __u16 lport,const u32 bhash_size)191 static inline u32 inet_bhashfn(const struct net *net, const __u16 lport,
192 			       const u32 bhash_size)
193 {
194 	return (lport + net_hash_mix(net)) & (bhash_size - 1);
195 }
196 
197 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
198 		    const unsigned short snum);
199 
200 /* These can have wildcards, don't try too hard. */
inet_lhashfn(const struct net * net,const unsigned short num)201 static inline u32 inet_lhashfn(const struct net *net, const unsigned short num)
202 {
203 	return (num + net_hash_mix(net)) & (INET_LHTABLE_SIZE - 1);
204 }
205 
inet_sk_listen_hashfn(const struct sock * sk)206 static inline int inet_sk_listen_hashfn(const struct sock *sk)
207 {
208 	return inet_lhashfn(sock_net(sk), inet_sk(sk)->inet_num);
209 }
210 
211 /* Caller must disable local BH processing. */
212 int __inet_inherit_port(const struct sock *sk, struct sock *child);
213 
214 void inet_put_port(struct sock *sk);
215 
216 void inet_hashinfo_init(struct inet_hashinfo *h);
217 
218 bool inet_ehash_insert(struct sock *sk, struct sock *osk);
219 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
220 int __inet_hash(struct sock *sk, struct sock *osk);
221 int inet_hash(struct sock *sk);
222 void inet_unhash(struct sock *sk);
223 
224 struct sock *__inet_lookup_listener(struct net *net,
225 				    struct inet_hashinfo *hashinfo,
226 				    struct sk_buff *skb, int doff,
227 				    const __be32 saddr, const __be16 sport,
228 				    const __be32 daddr,
229 				    const unsigned short hnum,
230 				    const int dif, const int sdif);
231 
inet_lookup_listener(struct net * net,struct inet_hashinfo * hashinfo,struct sk_buff * skb,int doff,__be32 saddr,__be16 sport,__be32 daddr,__be16 dport,int dif,int sdif)232 static inline struct sock *inet_lookup_listener(struct net *net,
233 		struct inet_hashinfo *hashinfo,
234 		struct sk_buff *skb, int doff,
235 		__be32 saddr, __be16 sport,
236 		__be32 daddr, __be16 dport, int dif, int sdif)
237 {
238 	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport,
239 				      daddr, ntohs(dport), dif, sdif);
240 }
241 
242 /* Socket demux engine toys. */
243 /* What happens here is ugly; there's a pair of adjacent fields in
244    struct inet_sock; __be16 dport followed by __u16 num.  We want to
245    search by pair, so we combine the keys into a single 32bit value
246    and compare with 32bit value read from &...->dport.  Let's at least
247    make sure that it's not mixed with anything else...
248    On 64bit targets we combine comparisons with pair of adjacent __be32
249    fields in the same way.
250 */
251 #ifdef __BIG_ENDIAN
252 #define INET_COMBINED_PORTS(__sport, __dport) \
253 	((__force __portpair)(((__force __u32)(__be16)(__sport) << 16) | (__u32)(__dport)))
254 #else /* __LITTLE_ENDIAN */
255 #define INET_COMBINED_PORTS(__sport, __dport) \
256 	((__force __portpair)(((__u32)(__dport) << 16) | (__force __u32)(__be16)(__sport)))
257 #endif
258 
259 #if (BITS_PER_LONG == 64)
260 #ifdef __BIG_ENDIAN
261 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
262 	const __addrpair __name = (__force __addrpair) ( \
263 				   (((__force __u64)(__be32)(__saddr)) << 32) | \
264 				   ((__force __u64)(__be32)(__daddr)))
265 #else /* __LITTLE_ENDIAN */
266 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
267 	const __addrpair __name = (__force __addrpair) ( \
268 				   (((__force __u64)(__be32)(__daddr)) << 32) | \
269 				   ((__force __u64)(__be32)(__saddr)))
270 #endif /* __BIG_ENDIAN */
271 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
272 	(((__sk)->sk_portpair == (__ports))			&&	\
273 	 ((__sk)->sk_addrpair == (__cookie))			&&	\
274 	 (!(__sk)->sk_bound_dev_if	||				\
275 	   ((__sk)->sk_bound_dev_if == (__dif))			||	\
276 	   ((__sk)->sk_bound_dev_if == (__sdif)))		&&	\
277 	 net_eq(sock_net(__sk), (__net)))
278 #else /* 32-bit arch */
279 #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
280 	const int __name __deprecated __attribute__((unused))
281 
282 #define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
283 	(((__sk)->sk_portpair == (__ports))		&&		\
284 	 ((__sk)->sk_daddr	== (__saddr))		&&		\
285 	 ((__sk)->sk_rcv_saddr	== (__daddr))		&&		\
286 	 (!(__sk)->sk_bound_dev_if	||				\
287 	   ((__sk)->sk_bound_dev_if == (__dif))		||		\
288 	   ((__sk)->sk_bound_dev_if == (__sdif)))	&&		\
289 	 net_eq(sock_net(__sk), (__net)))
290 #endif /* 64-bit arch */
291 
292 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
293  * not check it for lookups anymore, thanks Alexey. -DaveM
294  */
295 struct sock *__inet_lookup_established(struct net *net,
296 				       struct inet_hashinfo *hashinfo,
297 				       const __be32 saddr, const __be16 sport,
298 				       const __be32 daddr, const u16 hnum,
299 				       const int dif, const int sdif);
300 
301 static inline struct sock *
inet_lookup_established(struct net * net,struct inet_hashinfo * hashinfo,const __be32 saddr,const __be16 sport,const __be32 daddr,const __be16 dport,const int dif)302 	inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo,
303 				const __be32 saddr, const __be16 sport,
304 				const __be32 daddr, const __be16 dport,
305 				const int dif)
306 {
307 	return __inet_lookup_established(net, hashinfo, saddr, sport, daddr,
308 					 ntohs(dport), dif, 0);
309 }
310 
__inet_lookup(struct net * net,struct inet_hashinfo * hashinfo,struct sk_buff * skb,int doff,const __be32 saddr,const __be16 sport,const __be32 daddr,const __be16 dport,const int dif,const int sdif,bool * refcounted)311 static inline struct sock *__inet_lookup(struct net *net,
312 					 struct inet_hashinfo *hashinfo,
313 					 struct sk_buff *skb, int doff,
314 					 const __be32 saddr, const __be16 sport,
315 					 const __be32 daddr, const __be16 dport,
316 					 const int dif, const int sdif,
317 					 bool *refcounted)
318 {
319 	u16 hnum = ntohs(dport);
320 	struct sock *sk;
321 
322 	sk = __inet_lookup_established(net, hashinfo, saddr, sport,
323 				       daddr, hnum, dif, sdif);
324 	*refcounted = true;
325 	if (sk)
326 		return sk;
327 	*refcounted = false;
328 	return __inet_lookup_listener(net, hashinfo, skb, doff, saddr,
329 				      sport, daddr, hnum, dif, sdif);
330 }
331 
inet_lookup(struct net * net,struct inet_hashinfo * hashinfo,struct sk_buff * skb,int doff,const __be32 saddr,const __be16 sport,const __be32 daddr,const __be16 dport,const int dif)332 static inline struct sock *inet_lookup(struct net *net,
333 				       struct inet_hashinfo *hashinfo,
334 				       struct sk_buff *skb, int doff,
335 				       const __be32 saddr, const __be16 sport,
336 				       const __be32 daddr, const __be16 dport,
337 				       const int dif)
338 {
339 	struct sock *sk;
340 	bool refcounted;
341 
342 	sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
343 			   dport, dif, 0, &refcounted);
344 
345 	if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
346 		sk = NULL;
347 	return sk;
348 }
349 
__inet_lookup_skb(struct inet_hashinfo * hashinfo,struct sk_buff * skb,int doff,const __be16 sport,const __be16 dport,const int sdif,bool * refcounted)350 static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
351 					     struct sk_buff *skb,
352 					     int doff,
353 					     const __be16 sport,
354 					     const __be16 dport,
355 					     const int sdif,
356 					     bool *refcounted)
357 {
358 	struct sock *sk = skb_steal_sock(skb);
359 	const struct iphdr *iph = ip_hdr(skb);
360 
361 	*refcounted = true;
362 	if (sk)
363 		return sk;
364 
365 	return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb,
366 			     doff, iph->saddr, sport,
367 			     iph->daddr, dport, inet_iif(skb), sdif,
368 			     refcounted);
369 }
370 
371 u32 inet6_ehashfn(const struct net *net,
372 		  const struct in6_addr *laddr, const u16 lport,
373 		  const struct in6_addr *faddr, const __be16 fport);
374 
sk_daddr_set(struct sock * sk,__be32 addr)375 static inline void sk_daddr_set(struct sock *sk, __be32 addr)
376 {
377 	sk->sk_daddr = addr; /* alias of inet_daddr */
378 #if IS_ENABLED(CONFIG_IPV6)
379 	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_daddr);
380 #endif
381 }
382 
sk_rcv_saddr_set(struct sock * sk,__be32 addr)383 static inline void sk_rcv_saddr_set(struct sock *sk, __be32 addr)
384 {
385 	sk->sk_rcv_saddr = addr; /* alias of inet_rcv_saddr */
386 #if IS_ENABLED(CONFIG_IPV6)
387 	ipv6_addr_set_v4mapped(addr, &sk->sk_v6_rcv_saddr);
388 #endif
389 }
390 
391 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
392 			struct sock *sk, u32 port_offset,
393 			int (*check_established)(struct inet_timewait_death_row *,
394 						 struct sock *, __u16,
395 						 struct inet_timewait_sock **));
396 
397 int inet_hash_connect(struct inet_timewait_death_row *death_row,
398 		      struct sock *sk);
399 #endif /* _INET_HASHTABLES_H */
400