• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Connection state tracking for netfilter.  This is separated from,
3    but required by, the NAT layer; it can also be used by an iptables
4    extension. */
5 
6 /* (C) 1999-2001 Paul `Rusty' Russell
7  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
8  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
9  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
10  */
11 
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/skbuff.h>
19 #include <linux/proc_fs.h>
20 #include <linux/vmalloc.h>
21 #include <linux/stddef.h>
22 #include <linux/slab.h>
23 #include <linux/random.h>
24 #include <linux/jhash.h>
25 #include <linux/siphash.h>
26 #include <linux/err.h>
27 #include <linux/percpu.h>
28 #include <linux/moduleparam.h>
29 #include <linux/notifier.h>
30 #include <linux/kernel.h>
31 #include <linux/netdevice.h>
32 #include <linux/socket.h>
33 #include <linux/mm.h>
34 #include <linux/nsproxy.h>
35 #include <linux/rculist_nulls.h>
36 #include <trace/hooks/net.h>
37 
38 #include <net/netfilter/nf_conntrack.h>
39 #include <net/netfilter/nf_conntrack_l4proto.h>
40 #include <net/netfilter/nf_conntrack_expect.h>
41 #include <net/netfilter/nf_conntrack_helper.h>
42 #include <net/netfilter/nf_conntrack_seqadj.h>
43 #include <net/netfilter/nf_conntrack_core.h>
44 #include <net/netfilter/nf_conntrack_extend.h>
45 #include <net/netfilter/nf_conntrack_acct.h>
46 #include <net/netfilter/nf_conntrack_ecache.h>
47 #include <net/netfilter/nf_conntrack_zones.h>
48 #include <net/netfilter/nf_conntrack_timestamp.h>
49 #include <net/netfilter/nf_conntrack_timeout.h>
50 #include <net/netfilter/nf_conntrack_labels.h>
51 #include <net/netfilter/nf_conntrack_synproxy.h>
52 #include <net/netfilter/nf_nat.h>
53 #include <net/netfilter/nf_nat_helper.h>
54 #include <net/netns/hash.h>
55 #include <net/ip.h>
56 
57 #include "nf_internals.h"
58 
59 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
60 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
61 
62 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
63 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
64 
65 struct hlist_nulls_head *nf_conntrack_hash __read_mostly;
66 EXPORT_SYMBOL_GPL(nf_conntrack_hash);
67 
68 struct conntrack_gc_work {
69 	struct delayed_work	dwork;
70 	u32			next_bucket;
71 	bool			exiting;
72 	bool			early_drop;
73 };
74 
75 static __read_mostly struct kmem_cache *nf_conntrack_cachep;
76 static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
77 static __read_mostly bool nf_conntrack_locks_all;
78 
79 #define GC_SCAN_INTERVAL	(120u * HZ)
80 #define GC_SCAN_MAX_DURATION	msecs_to_jiffies(10)
81 
82 static struct conntrack_gc_work conntrack_gc_work;
83 
nf_conntrack_lock(spinlock_t * lock)84 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
85 {
86 	/* 1) Acquire the lock */
87 	spin_lock(lock);
88 
89 	/* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
90 	 * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
91 	 */
92 	if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
93 		return;
94 
95 	/* fast path failed, unlock */
96 	spin_unlock(lock);
97 
98 	/* Slow path 1) get global lock */
99 	spin_lock(&nf_conntrack_locks_all_lock);
100 
101 	/* Slow path 2) get the lock we want */
102 	spin_lock(lock);
103 
104 	/* Slow path 3) release the global lock */
105 	spin_unlock(&nf_conntrack_locks_all_lock);
106 }
107 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
108 
nf_conntrack_double_unlock(unsigned int h1,unsigned int h2)109 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
110 {
111 	h1 %= CONNTRACK_LOCKS;
112 	h2 %= CONNTRACK_LOCKS;
113 	spin_unlock(&nf_conntrack_locks[h1]);
114 	if (h1 != h2)
115 		spin_unlock(&nf_conntrack_locks[h2]);
116 }
117 
118 /* return true if we need to recompute hashes (in case hash table was resized) */
nf_conntrack_double_lock(struct net * net,unsigned int h1,unsigned int h2,unsigned int sequence)119 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
120 				     unsigned int h2, unsigned int sequence)
121 {
122 	h1 %= CONNTRACK_LOCKS;
123 	h2 %= CONNTRACK_LOCKS;
124 	if (h1 <= h2) {
125 		nf_conntrack_lock(&nf_conntrack_locks[h1]);
126 		if (h1 != h2)
127 			spin_lock_nested(&nf_conntrack_locks[h2],
128 					 SINGLE_DEPTH_NESTING);
129 	} else {
130 		nf_conntrack_lock(&nf_conntrack_locks[h2]);
131 		spin_lock_nested(&nf_conntrack_locks[h1],
132 				 SINGLE_DEPTH_NESTING);
133 	}
134 	if (read_seqcount_retry(&nf_conntrack_generation, sequence)) {
135 		nf_conntrack_double_unlock(h1, h2);
136 		return true;
137 	}
138 	return false;
139 }
140 
nf_conntrack_all_lock(void)141 static void nf_conntrack_all_lock(void)
142 {
143 	int i;
144 
145 	spin_lock(&nf_conntrack_locks_all_lock);
146 
147 	nf_conntrack_locks_all = true;
148 
149 	for (i = 0; i < CONNTRACK_LOCKS; i++) {
150 		spin_lock(&nf_conntrack_locks[i]);
151 
152 		/* This spin_unlock provides the "release" to ensure that
153 		 * nf_conntrack_locks_all==true is visible to everyone that
154 		 * acquired spin_lock(&nf_conntrack_locks[]).
155 		 */
156 		spin_unlock(&nf_conntrack_locks[i]);
157 	}
158 }
159 
nf_conntrack_all_unlock(void)160 static void nf_conntrack_all_unlock(void)
161 {
162 	/* All prior stores must be complete before we clear
163 	 * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
164 	 * might observe the false value but not the entire
165 	 * critical section.
166 	 * It pairs with the smp_load_acquire() in nf_conntrack_lock()
167 	 */
168 	smp_store_release(&nf_conntrack_locks_all, false);
169 	spin_unlock(&nf_conntrack_locks_all_lock);
170 }
171 
172 unsigned int nf_conntrack_htable_size __read_mostly;
173 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
174 
175 unsigned int nf_conntrack_max __read_mostly;
176 EXPORT_SYMBOL_GPL(nf_conntrack_max);
177 seqcount_t nf_conntrack_generation __read_mostly;
178 static unsigned int nf_conntrack_hash_rnd __read_mostly;
179 
hash_conntrack_raw(const struct nf_conntrack_tuple * tuple,const struct net * net)180 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
181 			      const struct net *net)
182 {
183 	unsigned int n;
184 	u32 seed;
185 
186 	get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
187 
188 	/* The direction must be ignored, so we hash everything up to the
189 	 * destination ports (which is a multiple of 4) and treat the last
190 	 * three bytes manually.
191 	 */
192 	seed = nf_conntrack_hash_rnd ^ net_hash_mix(net);
193 	n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
194 	return jhash2((u32 *)tuple, n, seed ^
195 		      (((__force __u16)tuple->dst.u.all << 16) |
196 		      tuple->dst.protonum));
197 }
198 
scale_hash(u32 hash)199 static u32 scale_hash(u32 hash)
200 {
201 	return reciprocal_scale(hash, nf_conntrack_htable_size);
202 }
203 
__hash_conntrack(const struct net * net,const struct nf_conntrack_tuple * tuple,unsigned int size)204 static u32 __hash_conntrack(const struct net *net,
205 			    const struct nf_conntrack_tuple *tuple,
206 			    unsigned int size)
207 {
208 	return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
209 }
210 
hash_conntrack(const struct net * net,const struct nf_conntrack_tuple * tuple)211 static u32 hash_conntrack(const struct net *net,
212 			  const struct nf_conntrack_tuple *tuple)
213 {
214 	return scale_hash(hash_conntrack_raw(tuple, net));
215 }
216 
nf_ct_get_tuple_ports(const struct sk_buff * skb,unsigned int dataoff,struct nf_conntrack_tuple * tuple)217 static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
218 				  unsigned int dataoff,
219 				  struct nf_conntrack_tuple *tuple)
220 {	struct {
221 		__be16 sport;
222 		__be16 dport;
223 	} _inet_hdr, *inet_hdr;
224 
225 	/* Actually only need first 4 bytes to get ports. */
226 	inet_hdr = skb_header_pointer(skb, dataoff, sizeof(_inet_hdr), &_inet_hdr);
227 	if (!inet_hdr)
228 		return false;
229 
230 	tuple->src.u.udp.port = inet_hdr->sport;
231 	tuple->dst.u.udp.port = inet_hdr->dport;
232 	return true;
233 }
234 
235 static bool
nf_ct_get_tuple(const struct sk_buff * skb,unsigned int nhoff,unsigned int dataoff,u_int16_t l3num,u_int8_t protonum,struct net * net,struct nf_conntrack_tuple * tuple)236 nf_ct_get_tuple(const struct sk_buff *skb,
237 		unsigned int nhoff,
238 		unsigned int dataoff,
239 		u_int16_t l3num,
240 		u_int8_t protonum,
241 		struct net *net,
242 		struct nf_conntrack_tuple *tuple)
243 {
244 	unsigned int size;
245 	const __be32 *ap;
246 	__be32 _addrs[8];
247 
248 	memset(tuple, 0, sizeof(*tuple));
249 
250 	tuple->src.l3num = l3num;
251 	switch (l3num) {
252 	case NFPROTO_IPV4:
253 		nhoff += offsetof(struct iphdr, saddr);
254 		size = 2 * sizeof(__be32);
255 		break;
256 	case NFPROTO_IPV6:
257 		nhoff += offsetof(struct ipv6hdr, saddr);
258 		size = sizeof(_addrs);
259 		break;
260 	default:
261 		return true;
262 	}
263 
264 	ap = skb_header_pointer(skb, nhoff, size, _addrs);
265 	if (!ap)
266 		return false;
267 
268 	switch (l3num) {
269 	case NFPROTO_IPV4:
270 		tuple->src.u3.ip = ap[0];
271 		tuple->dst.u3.ip = ap[1];
272 		break;
273 	case NFPROTO_IPV6:
274 		memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
275 		memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
276 		break;
277 	}
278 
279 	tuple->dst.protonum = protonum;
280 	tuple->dst.dir = IP_CT_DIR_ORIGINAL;
281 
282 	switch (protonum) {
283 #if IS_ENABLED(CONFIG_IPV6)
284 	case IPPROTO_ICMPV6:
285 		return icmpv6_pkt_to_tuple(skb, dataoff, net, tuple);
286 #endif
287 	case IPPROTO_ICMP:
288 		return icmp_pkt_to_tuple(skb, dataoff, net, tuple);
289 #ifdef CONFIG_NF_CT_PROTO_GRE
290 	case IPPROTO_GRE:
291 		return gre_pkt_to_tuple(skb, dataoff, net, tuple);
292 #endif
293 	case IPPROTO_TCP:
294 	case IPPROTO_UDP: /* fallthrough */
295 		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
296 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
297 	case IPPROTO_UDPLITE:
298 		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
299 #endif
300 #ifdef CONFIG_NF_CT_PROTO_SCTP
301 	case IPPROTO_SCTP:
302 		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
303 #endif
304 #ifdef CONFIG_NF_CT_PROTO_DCCP
305 	case IPPROTO_DCCP:
306 		return nf_ct_get_tuple_ports(skb, dataoff, tuple);
307 #endif
308 	default:
309 		break;
310 	}
311 
312 	return true;
313 }
314 
ipv4_get_l4proto(const struct sk_buff * skb,unsigned int nhoff,u_int8_t * protonum)315 static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
316 			    u_int8_t *protonum)
317 {
318 	int dataoff = -1;
319 	const struct iphdr *iph;
320 	struct iphdr _iph;
321 
322 	iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
323 	if (!iph)
324 		return -1;
325 
326 	/* Conntrack defragments packets, we might still see fragments
327 	 * inside ICMP packets though.
328 	 */
329 	if (iph->frag_off & htons(IP_OFFSET))
330 		return -1;
331 
332 	dataoff = nhoff + (iph->ihl << 2);
333 	*protonum = iph->protocol;
334 
335 	/* Check bogus IP headers */
336 	if (dataoff > skb->len) {
337 		pr_debug("bogus IPv4 packet: nhoff %u, ihl %u, skblen %u\n",
338 			 nhoff, iph->ihl << 2, skb->len);
339 		return -1;
340 	}
341 	return dataoff;
342 }
343 
344 #if IS_ENABLED(CONFIG_IPV6)
ipv6_get_l4proto(const struct sk_buff * skb,unsigned int nhoff,u8 * protonum)345 static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
346 			    u8 *protonum)
347 {
348 	int protoff = -1;
349 	unsigned int extoff = nhoff + sizeof(struct ipv6hdr);
350 	__be16 frag_off;
351 	u8 nexthdr;
352 
353 	if (skb_copy_bits(skb, nhoff + offsetof(struct ipv6hdr, nexthdr),
354 			  &nexthdr, sizeof(nexthdr)) != 0) {
355 		pr_debug("can't get nexthdr\n");
356 		return -1;
357 	}
358 	protoff = ipv6_skip_exthdr(skb, extoff, &nexthdr, &frag_off);
359 	/*
360 	 * (protoff == skb->len) means the packet has not data, just
361 	 * IPv6 and possibly extensions headers, but it is tracked anyway
362 	 */
363 	if (protoff < 0 || (frag_off & htons(~0x7)) != 0) {
364 		pr_debug("can't find proto in pkt\n");
365 		return -1;
366 	}
367 
368 	*protonum = nexthdr;
369 	return protoff;
370 }
371 #endif
372 
get_l4proto(const struct sk_buff * skb,unsigned int nhoff,u8 pf,u8 * l4num)373 static int get_l4proto(const struct sk_buff *skb,
374 		       unsigned int nhoff, u8 pf, u8 *l4num)
375 {
376 	switch (pf) {
377 	case NFPROTO_IPV4:
378 		return ipv4_get_l4proto(skb, nhoff, l4num);
379 #if IS_ENABLED(CONFIG_IPV6)
380 	case NFPROTO_IPV6:
381 		return ipv6_get_l4proto(skb, nhoff, l4num);
382 #endif
383 	default:
384 		*l4num = 0;
385 		break;
386 	}
387 	return -1;
388 }
389 
nf_ct_get_tuplepr(const struct sk_buff * skb,unsigned int nhoff,u_int16_t l3num,struct net * net,struct nf_conntrack_tuple * tuple)390 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
391 		       u_int16_t l3num,
392 		       struct net *net, struct nf_conntrack_tuple *tuple)
393 {
394 	u8 protonum;
395 	int protoff;
396 
397 	protoff = get_l4proto(skb, nhoff, l3num, &protonum);
398 	if (protoff <= 0)
399 		return false;
400 
401 	return nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple);
402 }
403 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
404 
405 bool
nf_ct_invert_tuple(struct nf_conntrack_tuple * inverse,const struct nf_conntrack_tuple * orig)406 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
407 		   const struct nf_conntrack_tuple *orig)
408 {
409 	memset(inverse, 0, sizeof(*inverse));
410 
411 	inverse->src.l3num = orig->src.l3num;
412 
413 	switch (orig->src.l3num) {
414 	case NFPROTO_IPV4:
415 		inverse->src.u3.ip = orig->dst.u3.ip;
416 		inverse->dst.u3.ip = orig->src.u3.ip;
417 		break;
418 	case NFPROTO_IPV6:
419 		inverse->src.u3.in6 = orig->dst.u3.in6;
420 		inverse->dst.u3.in6 = orig->src.u3.in6;
421 		break;
422 	default:
423 		break;
424 	}
425 
426 	inverse->dst.dir = !orig->dst.dir;
427 
428 	inverse->dst.protonum = orig->dst.protonum;
429 
430 	switch (orig->dst.protonum) {
431 	case IPPROTO_ICMP:
432 		return nf_conntrack_invert_icmp_tuple(inverse, orig);
433 #if IS_ENABLED(CONFIG_IPV6)
434 	case IPPROTO_ICMPV6:
435 		return nf_conntrack_invert_icmpv6_tuple(inverse, orig);
436 #endif
437 	}
438 
439 	inverse->src.u.all = orig->dst.u.all;
440 	inverse->dst.u.all = orig->src.u.all;
441 	return true;
442 }
443 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
444 
445 /* Generate a almost-unique pseudo-id for a given conntrack.
446  *
447  * intentionally doesn't re-use any of the seeds used for hash
448  * table location, we assume id gets exposed to userspace.
449  *
450  * Following nf_conn items do not change throughout lifetime
451  * of the nf_conn:
452  *
453  * 1. nf_conn address
454  * 2. nf_conn->master address (normally NULL)
455  * 3. the associated net namespace
456  * 4. the original direction tuple
457  */
nf_ct_get_id(const struct nf_conn * ct)458 u32 nf_ct_get_id(const struct nf_conn *ct)
459 {
460 	static __read_mostly siphash_key_t ct_id_seed;
461 	unsigned long a, b, c, d;
462 
463 	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
464 
465 	a = (unsigned long)ct;
466 	b = (unsigned long)ct->master;
467 	c = (unsigned long)nf_ct_net(ct);
468 	d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
469 				   sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
470 				   &ct_id_seed);
471 #ifdef CONFIG_64BIT
472 	return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
473 #else
474 	return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
475 #endif
476 }
477 EXPORT_SYMBOL_GPL(nf_ct_get_id);
478 
479 static void
clean_from_lists(struct nf_conn * ct)480 clean_from_lists(struct nf_conn *ct)
481 {
482 	pr_debug("clean_from_lists(%p)\n", ct);
483 	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
484 	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
485 
486 	/* Destroy all pending expectations */
487 	nf_ct_remove_expectations(ct);
488 }
489 
490 /* must be called with local_bh_disable */
nf_ct_add_to_dying_list(struct nf_conn * ct)491 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
492 {
493 	struct ct_pcpu *pcpu;
494 
495 	/* add this conntrack to the (per cpu) dying list */
496 	ct->cpu = smp_processor_id();
497 	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
498 
499 	spin_lock(&pcpu->lock);
500 	hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
501 			     &pcpu->dying);
502 	spin_unlock(&pcpu->lock);
503 }
504 
505 /* must be called with local_bh_disable */
nf_ct_add_to_unconfirmed_list(struct nf_conn * ct)506 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
507 {
508 	struct ct_pcpu *pcpu;
509 
510 	/* add this conntrack to the (per cpu) unconfirmed list */
511 	ct->cpu = smp_processor_id();
512 	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
513 
514 	spin_lock(&pcpu->lock);
515 	hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
516 			     &pcpu->unconfirmed);
517 	spin_unlock(&pcpu->lock);
518 }
519 
520 /* must be called with local_bh_disable */
nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn * ct)521 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
522 {
523 	struct ct_pcpu *pcpu;
524 
525 	/* We overload first tuple to link into unconfirmed or dying list.*/
526 	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
527 
528 	spin_lock(&pcpu->lock);
529 	BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
530 	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
531 	spin_unlock(&pcpu->lock);
532 }
533 
534 #define NFCT_ALIGN(len)	(((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
535 
536 /* Released via destroy_conntrack() */
nf_ct_tmpl_alloc(struct net * net,const struct nf_conntrack_zone * zone,gfp_t flags)537 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
538 				 const struct nf_conntrack_zone *zone,
539 				 gfp_t flags)
540 {
541 	struct nf_conn *tmpl, *p;
542 
543 	if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
544 		tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
545 		if (!tmpl)
546 			return NULL;
547 
548 		p = tmpl;
549 		tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
550 		if (tmpl != p) {
551 			tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
552 			tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
553 		}
554 	} else {
555 		tmpl = kzalloc(sizeof(*tmpl), flags);
556 		if (!tmpl)
557 			return NULL;
558 	}
559 
560 	tmpl->status = IPS_TEMPLATE;
561 	write_pnet(&tmpl->ct_net, net);
562 	nf_ct_zone_add(tmpl, zone);
563 	atomic_set(&tmpl->ct_general.use, 0);
564 
565 	return tmpl;
566 }
567 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
568 
nf_ct_tmpl_free(struct nf_conn * tmpl)569 void nf_ct_tmpl_free(struct nf_conn *tmpl)
570 {
571 	nf_ct_ext_destroy(tmpl);
572 	nf_ct_ext_free(tmpl);
573 
574 	if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
575 		kfree((char *)tmpl - tmpl->proto.tmpl_padto);
576 	else
577 		kfree(tmpl);
578 }
579 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
580 
destroy_gre_conntrack(struct nf_conn * ct)581 static void destroy_gre_conntrack(struct nf_conn *ct)
582 {
583 #ifdef CONFIG_NF_CT_PROTO_GRE
584 	struct nf_conn *master = ct->master;
585 
586 	if (master)
587 		nf_ct_gre_keymap_destroy(master);
588 #endif
589 }
590 
591 static void
destroy_conntrack(struct nf_conntrack * nfct)592 destroy_conntrack(struct nf_conntrack *nfct)
593 {
594 	struct nf_conn *ct = (struct nf_conn *)nfct;
595 
596 	pr_debug("destroy_conntrack(%p)\n", ct);
597 	WARN_ON(atomic_read(&nfct->use) != 0);
598 
599 	if (unlikely(nf_ct_is_template(ct))) {
600 		nf_ct_tmpl_free(ct);
601 		return;
602 	}
603 
604 	if (unlikely(nf_ct_protonum(ct) == IPPROTO_GRE))
605 		destroy_gre_conntrack(ct);
606 
607 	local_bh_disable();
608 	/* Expectations will have been removed in clean_from_lists,
609 	 * except TFTP can create an expectation on the first packet,
610 	 * before connection is in the list, so we need to clean here,
611 	 * too.
612 	 */
613 	nf_ct_remove_expectations(ct);
614 
615 	nf_ct_del_from_dying_or_unconfirmed_list(ct);
616 
617 	local_bh_enable();
618 
619 	if (ct->master)
620 		nf_ct_put(ct->master);
621 
622 	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
623 	nf_conntrack_free(ct);
624 }
625 
nf_ct_delete_from_lists(struct nf_conn * ct)626 static void nf_ct_delete_from_lists(struct nf_conn *ct)
627 {
628 	struct net *net = nf_ct_net(ct);
629 	unsigned int hash, reply_hash;
630 	unsigned int sequence;
631 
632 	nf_ct_helper_destroy(ct);
633 
634 	local_bh_disable();
635 	do {
636 		sequence = read_seqcount_begin(&nf_conntrack_generation);
637 		hash = hash_conntrack(net,
638 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
639 		reply_hash = hash_conntrack(net,
640 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
641 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
642 
643 	clean_from_lists(ct);
644 	nf_conntrack_double_unlock(hash, reply_hash);
645 
646 	nf_ct_add_to_dying_list(ct);
647 
648 	local_bh_enable();
649 }
650 
nf_ct_delete(struct nf_conn * ct,u32 portid,int report)651 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
652 {
653 	struct nf_conn_tstamp *tstamp;
654 
655 	if (test_and_set_bit(IPS_DYING_BIT, &ct->status))
656 		return false;
657 
658 	tstamp = nf_conn_tstamp_find(ct);
659 	if (tstamp) {
660 		s32 timeout = ct->timeout - nfct_time_stamp;
661 
662 		tstamp->stop = ktime_get_real_ns();
663 		if (timeout < 0)
664 			tstamp->stop -= jiffies_to_nsecs(-timeout);
665 	}
666 
667 	if (nf_conntrack_event_report(IPCT_DESTROY, ct,
668 				    portid, report) < 0) {
669 		/* destroy event was not delivered. nf_ct_put will
670 		 * be done by event cache worker on redelivery.
671 		 */
672 		nf_ct_delete_from_lists(ct);
673 		nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
674 		return false;
675 	}
676 
677 	nf_conntrack_ecache_work(nf_ct_net(ct));
678 	nf_ct_delete_from_lists(ct);
679 	nf_ct_put(ct);
680 	return true;
681 }
682 EXPORT_SYMBOL_GPL(nf_ct_delete);
683 
684 static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash * h,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_zone * zone,const struct net * net)685 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
686 		const struct nf_conntrack_tuple *tuple,
687 		const struct nf_conntrack_zone *zone,
688 		const struct net *net)
689 {
690 	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
691 
692 	/* A conntrack can be recreated with the equal tuple,
693 	 * so we need to check that the conntrack is confirmed
694 	 */
695 	return nf_ct_tuple_equal(tuple, &h->tuple) &&
696 	       nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
697 	       nf_ct_is_confirmed(ct) &&
698 	       net_eq(net, nf_ct_net(ct));
699 }
700 
701 static inline bool
nf_ct_match(const struct nf_conn * ct1,const struct nf_conn * ct2)702 nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
703 {
704 	return nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
705 				 &ct2->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
706 	       nf_ct_tuple_equal(&ct1->tuplehash[IP_CT_DIR_REPLY].tuple,
707 				 &ct2->tuplehash[IP_CT_DIR_REPLY].tuple) &&
708 	       nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_ORIGINAL) &&
709 	       nf_ct_zone_equal(ct1, nf_ct_zone(ct2), IP_CT_DIR_REPLY) &&
710 	       net_eq(nf_ct_net(ct1), nf_ct_net(ct2));
711 }
712 
713 /* caller must hold rcu readlock and none of the nf_conntrack_locks */
nf_ct_gc_expired(struct nf_conn * ct)714 static void nf_ct_gc_expired(struct nf_conn *ct)
715 {
716 	if (!atomic_inc_not_zero(&ct->ct_general.use))
717 		return;
718 
719 	if (nf_ct_should_gc(ct))
720 		nf_ct_kill(ct);
721 
722 	nf_ct_put(ct);
723 }
724 
725 /*
726  * Warning :
727  * - Caller must take a reference on returned object
728  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
729  */
730 static struct nf_conntrack_tuple_hash *
____nf_conntrack_find(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple,u32 hash)731 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
732 		      const struct nf_conntrack_tuple *tuple, u32 hash)
733 {
734 	struct nf_conntrack_tuple_hash *h;
735 	struct hlist_nulls_head *ct_hash;
736 	struct hlist_nulls_node *n;
737 	unsigned int bucket, hsize;
738 
739 begin:
740 	nf_conntrack_get_ht(&ct_hash, &hsize);
741 	bucket = reciprocal_scale(hash, hsize);
742 
743 	hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) {
744 		struct nf_conn *ct;
745 
746 		ct = nf_ct_tuplehash_to_ctrack(h);
747 		if (nf_ct_is_expired(ct)) {
748 			nf_ct_gc_expired(ct);
749 			continue;
750 		}
751 
752 		if (nf_ct_key_equal(h, tuple, zone, net))
753 			return h;
754 	}
755 	/*
756 	 * if the nulls value we got at the end of this lookup is
757 	 * not the expected one, we must restart lookup.
758 	 * We probably met an item that was moved to another chain.
759 	 */
760 	if (get_nulls_value(n) != bucket) {
761 		NF_CT_STAT_INC_ATOMIC(net, search_restart);
762 		goto begin;
763 	}
764 
765 	return NULL;
766 }
767 
768 /* Find a connection corresponding to a tuple. */
769 static struct nf_conntrack_tuple_hash *
__nf_conntrack_find_get(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple,u32 hash)770 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
771 			const struct nf_conntrack_tuple *tuple, u32 hash)
772 {
773 	struct nf_conntrack_tuple_hash *h;
774 	struct nf_conn *ct;
775 
776 	rcu_read_lock();
777 
778 	h = ____nf_conntrack_find(net, zone, tuple, hash);
779 	if (h) {
780 		/* We have a candidate that matches the tuple we're interested
781 		 * in, try to obtain a reference and re-check tuple
782 		 */
783 		ct = nf_ct_tuplehash_to_ctrack(h);
784 		if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
785 			if (likely(nf_ct_key_equal(h, tuple, zone, net)))
786 				goto found;
787 
788 			/* TYPESAFE_BY_RCU recycled the candidate */
789 			nf_ct_put(ct);
790 		}
791 
792 		h = NULL;
793 	}
794 found:
795 	rcu_read_unlock();
796 
797 	return h;
798 }
799 
800 struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple)801 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
802 		      const struct nf_conntrack_tuple *tuple)
803 {
804 	return __nf_conntrack_find_get(net, zone, tuple,
805 				       hash_conntrack_raw(tuple, net));
806 }
807 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
808 
__nf_conntrack_hash_insert(struct nf_conn * ct,unsigned int hash,unsigned int reply_hash)809 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
810 				       unsigned int hash,
811 				       unsigned int reply_hash)
812 {
813 	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
814 			   &nf_conntrack_hash[hash]);
815 	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
816 			   &nf_conntrack_hash[reply_hash]);
817 }
818 
819 int
nf_conntrack_hash_check_insert(struct nf_conn * ct)820 nf_conntrack_hash_check_insert(struct nf_conn *ct)
821 {
822 	const struct nf_conntrack_zone *zone;
823 	struct net *net = nf_ct_net(ct);
824 	unsigned int hash, reply_hash;
825 	struct nf_conntrack_tuple_hash *h;
826 	struct hlist_nulls_node *n;
827 	unsigned int sequence;
828 
829 	zone = nf_ct_zone(ct);
830 
831 	local_bh_disable();
832 	do {
833 		sequence = read_seqcount_begin(&nf_conntrack_generation);
834 		hash = hash_conntrack(net,
835 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
836 		reply_hash = hash_conntrack(net,
837 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
838 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
839 
840 	/* See if there's one in the list already, including reverse */
841 	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
842 		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
843 				    zone, net))
844 			goto out;
845 
846 	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
847 		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
848 				    zone, net))
849 			goto out;
850 
851 	smp_wmb();
852 	/* The caller holds a reference to this object */
853 	atomic_set(&ct->ct_general.use, 2);
854 	__nf_conntrack_hash_insert(ct, hash, reply_hash);
855 	nf_conntrack_double_unlock(hash, reply_hash);
856 	NF_CT_STAT_INC(net, insert);
857 	local_bh_enable();
858 	return 0;
859 
860 out:
861 	nf_conntrack_double_unlock(hash, reply_hash);
862 	NF_CT_STAT_INC(net, insert_failed);
863 	local_bh_enable();
864 	return -EEXIST;
865 }
866 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
867 
nf_ct_acct_update(struct nf_conn * ct,enum ip_conntrack_info ctinfo,unsigned int len)868 static inline void nf_ct_acct_update(struct nf_conn *ct,
869 				     enum ip_conntrack_info ctinfo,
870 				     unsigned int len)
871 {
872 	struct nf_conn_acct *acct;
873 
874 	acct = nf_conn_acct_find(ct);
875 	if (acct) {
876 		struct nf_conn_counter *counter = acct->counter;
877 
878 		atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
879 		atomic64_add(len, &counter[CTINFO2DIR(ctinfo)].bytes);
880 	}
881 }
882 
nf_ct_acct_merge(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct nf_conn * loser_ct)883 static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
884 			     const struct nf_conn *loser_ct)
885 {
886 	struct nf_conn_acct *acct;
887 
888 	acct = nf_conn_acct_find(loser_ct);
889 	if (acct) {
890 		struct nf_conn_counter *counter = acct->counter;
891 		unsigned int bytes;
892 
893 		/* u32 should be fine since we must have seen one packet. */
894 		bytes = atomic64_read(&counter[CTINFO2DIR(ctinfo)].bytes);
895 		nf_ct_acct_update(ct, ctinfo, bytes);
896 	}
897 }
898 
899 /* Resolve race on insertion if this protocol allows this. */
nf_ct_resolve_clash(struct net * net,struct sk_buff * skb,enum ip_conntrack_info ctinfo,struct nf_conntrack_tuple_hash * h)900 static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
901 			       enum ip_conntrack_info ctinfo,
902 			       struct nf_conntrack_tuple_hash *h)
903 {
904 	/* This is the conntrack entry already in hashes that won race. */
905 	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
906 	const struct nf_conntrack_l4proto *l4proto;
907 	enum ip_conntrack_info oldinfo;
908 	struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
909 
910 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
911 	if (l4proto->allow_clash &&
912 	    !nf_ct_is_dying(ct) &&
913 	    atomic_inc_not_zero(&ct->ct_general.use)) {
914 		if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
915 		    nf_ct_match(ct, loser_ct)) {
916 			nf_ct_acct_merge(ct, ctinfo, loser_ct);
917 			nf_conntrack_put(&loser_ct->ct_general);
918 			nf_ct_set(skb, ct, oldinfo);
919 			return NF_ACCEPT;
920 		}
921 		nf_ct_put(ct);
922 	}
923 	NF_CT_STAT_INC(net, drop);
924 	return NF_DROP;
925 }
926 
927 /* Confirm a connection given skb; places it in hash table */
928 int
__nf_conntrack_confirm(struct sk_buff * skb)929 __nf_conntrack_confirm(struct sk_buff *skb)
930 {
931 	const struct nf_conntrack_zone *zone;
932 	unsigned int hash, reply_hash;
933 	struct nf_conntrack_tuple_hash *h;
934 	struct nf_conn *ct;
935 	struct nf_conn_help *help;
936 	struct nf_conn_tstamp *tstamp;
937 	struct hlist_nulls_node *n;
938 	enum ip_conntrack_info ctinfo;
939 	struct net *net;
940 	unsigned int sequence;
941 	int ret = NF_DROP;
942 
943 	ct = nf_ct_get(skb, &ctinfo);
944 	net = nf_ct_net(ct);
945 
946 	/* ipt_REJECT uses nf_conntrack_attach to attach related
947 	   ICMP/TCP RST packets in other direction.  Actual packet
948 	   which created connection will be IP_CT_NEW or for an
949 	   expected connection, IP_CT_RELATED. */
950 	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
951 		return NF_ACCEPT;
952 
953 	zone = nf_ct_zone(ct);
954 	local_bh_disable();
955 
956 	do {
957 		sequence = read_seqcount_begin(&nf_conntrack_generation);
958 		/* reuse the hash saved before */
959 		hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
960 		hash = scale_hash(hash);
961 		reply_hash = hash_conntrack(net,
962 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
963 
964 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
965 
966 	/* We're not in hash table, and we refuse to set up related
967 	 * connections for unconfirmed conns.  But packet copies and
968 	 * REJECT will give spurious warnings here.
969 	 */
970 
971 	/* Another skb with the same unconfirmed conntrack may
972 	 * win the race. This may happen for bridge(br_flood)
973 	 * or broadcast/multicast packets do skb_clone with
974 	 * unconfirmed conntrack.
975 	 */
976 	if (unlikely(nf_ct_is_confirmed(ct))) {
977 		WARN_ON_ONCE(1);
978 		nf_conntrack_double_unlock(hash, reply_hash);
979 		local_bh_enable();
980 		return NF_DROP;
981 	}
982 
983 	pr_debug("Confirming conntrack %p\n", ct);
984 	/* We have to check the DYING flag after unlink to prevent
985 	 * a race against nf_ct_get_next_corpse() possibly called from
986 	 * user context, else we insert an already 'dead' hash, blocking
987 	 * further use of that particular connection -JM.
988 	 */
989 	nf_ct_del_from_dying_or_unconfirmed_list(ct);
990 
991 	if (unlikely(nf_ct_is_dying(ct))) {
992 		nf_ct_add_to_dying_list(ct);
993 		goto dying;
994 	}
995 
996 	/* See if there's one in the list already, including reverse:
997 	   NAT could have grabbed it without realizing, since we're
998 	   not in the hash.  If there is, we lost race. */
999 	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode)
1000 		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1001 				    zone, net))
1002 			goto out;
1003 
1004 	hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode)
1005 		if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
1006 				    zone, net))
1007 			goto out;
1008 
1009 	/* Timer relative to confirmation time, not original
1010 	   setting time, otherwise we'd get timer wrap in
1011 	   weird delay cases. */
1012 	ct->timeout += nfct_time_stamp;
1013 	atomic_inc(&ct->ct_general.use);
1014 	ct->status |= IPS_CONFIRMED;
1015 
1016 	/* set conntrack timestamp, if enabled. */
1017 	tstamp = nf_conn_tstamp_find(ct);
1018 	if (tstamp)
1019 		tstamp->start = ktime_get_real_ns();
1020 
1021 	/* Since the lookup is lockless, hash insertion must be done after
1022 	 * starting the timer and setting the CONFIRMED bit. The RCU barriers
1023 	 * guarantee that no other CPU can find the conntrack before the above
1024 	 * stores are visible.
1025 	 */
1026 	__nf_conntrack_hash_insert(ct, hash, reply_hash);
1027 	nf_conntrack_double_unlock(hash, reply_hash);
1028 	local_bh_enable();
1029 
1030 	help = nfct_help(ct);
1031 	if (help && help->helper)
1032 		nf_conntrack_event_cache(IPCT_HELPER, ct);
1033 
1034 	nf_conntrack_event_cache(master_ct(ct) ?
1035 				 IPCT_RELATED : IPCT_NEW, ct);
1036 	return NF_ACCEPT;
1037 
1038 out:
1039 	nf_ct_add_to_dying_list(ct);
1040 	ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
1041 dying:
1042 	nf_conntrack_double_unlock(hash, reply_hash);
1043 	NF_CT_STAT_INC(net, insert_failed);
1044 	local_bh_enable();
1045 	return ret;
1046 }
1047 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
1048 
1049 /* Returns true if a connection correspondings to the tuple (required
1050    for NAT). */
1051 int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple * tuple,const struct nf_conn * ignored_conntrack)1052 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
1053 			 const struct nf_conn *ignored_conntrack)
1054 {
1055 	struct net *net = nf_ct_net(ignored_conntrack);
1056 	const struct nf_conntrack_zone *zone;
1057 	struct nf_conntrack_tuple_hash *h;
1058 	struct hlist_nulls_head *ct_hash;
1059 	unsigned int hash, hsize;
1060 	struct hlist_nulls_node *n;
1061 	struct nf_conn *ct;
1062 
1063 	zone = nf_ct_zone(ignored_conntrack);
1064 
1065 	rcu_read_lock();
1066  begin:
1067 	nf_conntrack_get_ht(&ct_hash, &hsize);
1068 	hash = __hash_conntrack(net, tuple, hsize);
1069 
1070 	hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
1071 		ct = nf_ct_tuplehash_to_ctrack(h);
1072 
1073 		if (ct == ignored_conntrack)
1074 			continue;
1075 
1076 		if (nf_ct_is_expired(ct)) {
1077 			nf_ct_gc_expired(ct);
1078 			continue;
1079 		}
1080 
1081 		if (nf_ct_key_equal(h, tuple, zone, net)) {
1082 			/* Tuple is taken already, so caller will need to find
1083 			 * a new source port to use.
1084 			 *
1085 			 * Only exception:
1086 			 * If the *original tuples* are identical, then both
1087 			 * conntracks refer to the same flow.
1088 			 * This is a rare situation, it can occur e.g. when
1089 			 * more than one UDP packet is sent from same socket
1090 			 * in different threads.
1091 			 *
1092 			 * Let nf_ct_resolve_clash() deal with this later.
1093 			 */
1094 			if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1095 					      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
1096 					      nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
1097 				continue;
1098 
1099 			NF_CT_STAT_INC_ATOMIC(net, found);
1100 			rcu_read_unlock();
1101 			return 1;
1102 		}
1103 	}
1104 
1105 	if (get_nulls_value(n) != hash) {
1106 		NF_CT_STAT_INC_ATOMIC(net, search_restart);
1107 		goto begin;
1108 	}
1109 
1110 	rcu_read_unlock();
1111 
1112 	return 0;
1113 }
1114 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
1115 
1116 #define NF_CT_EVICTION_RANGE	8
1117 
1118 /* There's a small race here where we may free a just-assured
1119    connection.  Too bad: we're in trouble anyway. */
early_drop_list(struct net * net,struct hlist_nulls_head * head)1120 static unsigned int early_drop_list(struct net *net,
1121 				    struct hlist_nulls_head *head)
1122 {
1123 	struct nf_conntrack_tuple_hash *h;
1124 	struct hlist_nulls_node *n;
1125 	unsigned int drops = 0;
1126 	struct nf_conn *tmp;
1127 
1128 	hlist_nulls_for_each_entry_rcu(h, n, head, hnnode) {
1129 		tmp = nf_ct_tuplehash_to_ctrack(h);
1130 
1131 		if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
1132 			continue;
1133 
1134 		if (nf_ct_is_expired(tmp)) {
1135 			nf_ct_gc_expired(tmp);
1136 			continue;
1137 		}
1138 
1139 		if (test_bit(IPS_ASSURED_BIT, &tmp->status) ||
1140 		    !net_eq(nf_ct_net(tmp), net) ||
1141 		    nf_ct_is_dying(tmp))
1142 			continue;
1143 
1144 		if (!atomic_inc_not_zero(&tmp->ct_general.use))
1145 			continue;
1146 
1147 		/* kill only if still in same netns -- might have moved due to
1148 		 * SLAB_TYPESAFE_BY_RCU rules.
1149 		 *
1150 		 * We steal the timer reference.  If that fails timer has
1151 		 * already fired or someone else deleted it. Just drop ref
1152 		 * and move to next entry.
1153 		 */
1154 		if (net_eq(nf_ct_net(tmp), net) &&
1155 		    nf_ct_is_confirmed(tmp) &&
1156 		    nf_ct_delete(tmp, 0, 0))
1157 			drops++;
1158 
1159 		nf_ct_put(tmp);
1160 	}
1161 
1162 	return drops;
1163 }
1164 
early_drop(struct net * net,unsigned int hash)1165 static noinline int early_drop(struct net *net, unsigned int hash)
1166 {
1167 	unsigned int i, bucket;
1168 
1169 	for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
1170 		struct hlist_nulls_head *ct_hash;
1171 		unsigned int hsize, drops;
1172 
1173 		rcu_read_lock();
1174 		nf_conntrack_get_ht(&ct_hash, &hsize);
1175 		if (!i)
1176 			bucket = reciprocal_scale(hash, hsize);
1177 		else
1178 			bucket = (bucket + 1) % hsize;
1179 
1180 		drops = early_drop_list(net, &ct_hash[bucket]);
1181 		rcu_read_unlock();
1182 
1183 		if (drops) {
1184 			NF_CT_STAT_ADD_ATOMIC(net, early_drop, drops);
1185 			return true;
1186 		}
1187 	}
1188 
1189 	return false;
1190 }
1191 
gc_worker_skip_ct(const struct nf_conn * ct)1192 static bool gc_worker_skip_ct(const struct nf_conn *ct)
1193 {
1194 	return !nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct);
1195 }
1196 
gc_worker_can_early_drop(const struct nf_conn * ct)1197 static bool gc_worker_can_early_drop(const struct nf_conn *ct)
1198 {
1199 	const struct nf_conntrack_l4proto *l4proto;
1200 
1201 	if (!test_bit(IPS_ASSURED_BIT, &ct->status))
1202 		return true;
1203 
1204 	l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
1205 	if (l4proto->can_early_drop && l4proto->can_early_drop(ct))
1206 		return true;
1207 
1208 	return false;
1209 }
1210 
1211 #define	DAY	(86400 * HZ)
1212 
1213 /* Set an arbitrary timeout large enough not to ever expire, this save
1214  * us a check for the IPS_OFFLOAD_BIT from the packet path via
1215  * nf_ct_is_expired().
1216  */
nf_ct_offload_timeout(struct nf_conn * ct)1217 static void nf_ct_offload_timeout(struct nf_conn *ct)
1218 {
1219 	if (nf_ct_expires(ct) < DAY / 2)
1220 		ct->timeout = nfct_time_stamp + DAY;
1221 }
1222 
gc_worker(struct work_struct * work)1223 static void gc_worker(struct work_struct *work)
1224 {
1225 	unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
1226 	unsigned int i, hashsz, nf_conntrack_max95 = 0;
1227 	unsigned long next_run = GC_SCAN_INTERVAL;
1228 	struct conntrack_gc_work *gc_work;
1229 	gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
1230 
1231 	i = gc_work->next_bucket;
1232 	if (gc_work->early_drop)
1233 		nf_conntrack_max95 = nf_conntrack_max / 100u * 95u;
1234 
1235 	do {
1236 		struct nf_conntrack_tuple_hash *h;
1237 		struct hlist_nulls_head *ct_hash;
1238 		struct hlist_nulls_node *n;
1239 		struct nf_conn *tmp;
1240 
1241 		rcu_read_lock();
1242 
1243 		nf_conntrack_get_ht(&ct_hash, &hashsz);
1244 		if (i >= hashsz) {
1245 			rcu_read_unlock();
1246 			break;
1247 		}
1248 
1249 		hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
1250 			struct net *net;
1251 
1252 			tmp = nf_ct_tuplehash_to_ctrack(h);
1253 
1254 			if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
1255 				nf_ct_offload_timeout(tmp);
1256 				continue;
1257 			}
1258 
1259 			if (nf_ct_is_expired(tmp)) {
1260 				nf_ct_gc_expired(tmp);
1261 				continue;
1262 			}
1263 
1264 			if (nf_conntrack_max95 == 0 || gc_worker_skip_ct(tmp))
1265 				continue;
1266 
1267 			net = nf_ct_net(tmp);
1268 			if (atomic_read(&net->ct.count) < nf_conntrack_max95)
1269 				continue;
1270 
1271 			/* need to take reference to avoid possible races */
1272 			if (!atomic_inc_not_zero(&tmp->ct_general.use))
1273 				continue;
1274 
1275 			if (gc_worker_skip_ct(tmp)) {
1276 				nf_ct_put(tmp);
1277 				continue;
1278 			}
1279 
1280 			if (gc_worker_can_early_drop(tmp))
1281 				nf_ct_kill(tmp);
1282 
1283 			nf_ct_put(tmp);
1284 		}
1285 
1286 		/* could check get_nulls_value() here and restart if ct
1287 		 * was moved to another chain.  But given gc is best-effort
1288 		 * we will just continue with next hash slot.
1289 		 */
1290 		rcu_read_unlock();
1291 		cond_resched();
1292 		i++;
1293 
1294 		if (time_after(jiffies, end_time) && i < hashsz) {
1295 			gc_work->next_bucket = i;
1296 			next_run = 0;
1297 			break;
1298 		}
1299 	} while (i < hashsz);
1300 
1301 	if (gc_work->exiting)
1302 		return;
1303 
1304 	/*
1305 	 * Eviction will normally happen from the packet path, and not
1306 	 * from this gc worker.
1307 	 *
1308 	 * This worker is only here to reap expired entries when system went
1309 	 * idle after a busy period.
1310 	 */
1311 	if (next_run) {
1312 		gc_work->early_drop = false;
1313 		gc_work->next_bucket = 0;
1314 	}
1315 	queue_delayed_work(system_power_efficient_wq, &gc_work->dwork, next_run);
1316 }
1317 
conntrack_gc_work_init(struct conntrack_gc_work * gc_work)1318 static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
1319 {
1320 	INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker);
1321 	gc_work->exiting = false;
1322 }
1323 
1324 static struct nf_conn *
__nf_conntrack_alloc(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_tuple * repl,gfp_t gfp,u32 hash)1325 __nf_conntrack_alloc(struct net *net,
1326 		     const struct nf_conntrack_zone *zone,
1327 		     const struct nf_conntrack_tuple *orig,
1328 		     const struct nf_conntrack_tuple *repl,
1329 		     gfp_t gfp, u32 hash)
1330 {
1331 	struct nf_conn *ct;
1332 
1333 	/* We don't want any race condition at early drop stage */
1334 	atomic_inc(&net->ct.count);
1335 
1336 	if (nf_conntrack_max &&
1337 	    unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
1338 		if (!early_drop(net, hash)) {
1339 			if (!conntrack_gc_work.early_drop)
1340 				conntrack_gc_work.early_drop = true;
1341 			atomic_dec(&net->ct.count);
1342 			net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
1343 			return ERR_PTR(-ENOMEM);
1344 		}
1345 	}
1346 
1347 	/*
1348 	 * Do not use kmem_cache_zalloc(), as this cache uses
1349 	 * SLAB_TYPESAFE_BY_RCU.
1350 	 */
1351 	ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
1352 	if (ct == NULL)
1353 		goto out;
1354 
1355 	spin_lock_init(&ct->lock);
1356 	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
1357 	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
1358 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
1359 	/* save hash for reusing when confirming */
1360 	*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
1361 	ct->status = 0;
1362 	ct->timeout = 0;
1363 	write_pnet(&ct->ct_net, net);
1364 	memset(&ct->__nfct_init_offset, 0,
1365 	       offsetof(struct nf_conn, proto) -
1366 	       offsetof(struct nf_conn, __nfct_init_offset));
1367 
1368 	nf_ct_zone_add(ct, zone);
1369 
1370 	trace_android_rvh_nf_conn_alloc(ct);
1371 
1372 	/* Because we use RCU lookups, we set ct_general.use to zero before
1373 	 * this is inserted in any list.
1374 	 */
1375 	atomic_set(&ct->ct_general.use, 0);
1376 	return ct;
1377 out:
1378 	atomic_dec(&net->ct.count);
1379 	return ERR_PTR(-ENOMEM);
1380 }
1381 
nf_conntrack_alloc(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_tuple * repl,gfp_t gfp)1382 struct nf_conn *nf_conntrack_alloc(struct net *net,
1383 				   const struct nf_conntrack_zone *zone,
1384 				   const struct nf_conntrack_tuple *orig,
1385 				   const struct nf_conntrack_tuple *repl,
1386 				   gfp_t gfp)
1387 {
1388 	return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
1389 }
1390 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
1391 
nf_conntrack_free(struct nf_conn * ct)1392 void nf_conntrack_free(struct nf_conn *ct)
1393 {
1394 	struct net *net = nf_ct_net(ct);
1395 
1396 	/* A freed object has refcnt == 0, that's
1397 	 * the golden rule for SLAB_TYPESAFE_BY_RCU
1398 	 */
1399 	WARN_ON(atomic_read(&ct->ct_general.use) != 0);
1400 
1401 	nf_ct_ext_destroy(ct);
1402 	nf_ct_ext_free(ct);
1403 	kmem_cache_free(nf_conntrack_cachep, ct);
1404 	smp_mb__before_atomic();
1405 	trace_android_rvh_nf_conn_free(ct);
1406 	atomic_dec(&net->ct.count);
1407 }
1408 EXPORT_SYMBOL_GPL(nf_conntrack_free);
1409 
1410 
1411 /* Allocate a new conntrack: we return -ENOMEM if classification
1412    failed due to stress.  Otherwise it really is unclassifiable. */
1413 static noinline struct nf_conntrack_tuple_hash *
init_conntrack(struct net * net,struct nf_conn * tmpl,const struct nf_conntrack_tuple * tuple,struct sk_buff * skb,unsigned int dataoff,u32 hash)1414 init_conntrack(struct net *net, struct nf_conn *tmpl,
1415 	       const struct nf_conntrack_tuple *tuple,
1416 	       struct sk_buff *skb,
1417 	       unsigned int dataoff, u32 hash)
1418 {
1419 	struct nf_conn *ct;
1420 	struct nf_conn_help *help;
1421 	struct nf_conntrack_tuple repl_tuple;
1422 	struct nf_conntrack_ecache *ecache;
1423 	struct nf_conntrack_expect *exp = NULL;
1424 	const struct nf_conntrack_zone *zone;
1425 	struct nf_conn_timeout *timeout_ext;
1426 	struct nf_conntrack_zone tmp;
1427 
1428 	if (!nf_ct_invert_tuple(&repl_tuple, tuple)) {
1429 		pr_debug("Can't invert tuple.\n");
1430 		return NULL;
1431 	}
1432 
1433 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1434 	ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
1435 				  hash);
1436 	if (IS_ERR(ct))
1437 		return (struct nf_conntrack_tuple_hash *)ct;
1438 
1439 	if (!nf_ct_add_synproxy(ct, tmpl)) {
1440 		nf_conntrack_free(ct);
1441 		return ERR_PTR(-ENOMEM);
1442 	}
1443 
1444 	timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
1445 
1446 	if (timeout_ext)
1447 		nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1448 				      GFP_ATOMIC);
1449 
1450 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1451 	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1452 	nf_ct_labels_ext_add(ct);
1453 
1454 	ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1455 	nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1456 				 ecache ? ecache->expmask : 0,
1457 			     GFP_ATOMIC);
1458 
1459 	local_bh_disable();
1460 	if (net->ct.expect_count) {
1461 		spin_lock(&nf_conntrack_expect_lock);
1462 		exp = nf_ct_find_expectation(net, zone, tuple);
1463 		if (exp) {
1464 			pr_debug("expectation arrives ct=%p exp=%p\n",
1465 				 ct, exp);
1466 			/* Welcome, Mr. Bond.  We've been expecting you... */
1467 			__set_bit(IPS_EXPECTED_BIT, &ct->status);
1468 			/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1469 			ct->master = exp->master;
1470 			if (exp->helper) {
1471 				help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
1472 				if (help)
1473 					rcu_assign_pointer(help->helper, exp->helper);
1474 			}
1475 
1476 #ifdef CONFIG_NF_CONNTRACK_MARK
1477 			ct->mark = exp->master->mark;
1478 #endif
1479 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1480 			ct->secmark = exp->master->secmark;
1481 #endif
1482 			NF_CT_STAT_INC(net, expect_new);
1483 		}
1484 		spin_unlock(&nf_conntrack_expect_lock);
1485 	}
1486 	if (!exp)
1487 		__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1488 
1489 	/* Now it is inserted into the unconfirmed list, bump refcount */
1490 	nf_conntrack_get(&ct->ct_general);
1491 	nf_ct_add_to_unconfirmed_list(ct);
1492 
1493 	local_bh_enable();
1494 
1495 	if (exp) {
1496 		if (exp->expectfn)
1497 			exp->expectfn(ct, exp);
1498 		nf_ct_expect_put(exp);
1499 	}
1500 
1501 	return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1502 }
1503 
1504 /* On success, returns 0, sets skb->_nfct | ctinfo */
1505 static int
resolve_normal_ct(struct nf_conn * tmpl,struct sk_buff * skb,unsigned int dataoff,u_int8_t protonum,const struct nf_hook_state * state)1506 resolve_normal_ct(struct nf_conn *tmpl,
1507 		  struct sk_buff *skb,
1508 		  unsigned int dataoff,
1509 		  u_int8_t protonum,
1510 		  const struct nf_hook_state *state)
1511 {
1512 	const struct nf_conntrack_zone *zone;
1513 	struct nf_conntrack_tuple tuple;
1514 	struct nf_conntrack_tuple_hash *h;
1515 	enum ip_conntrack_info ctinfo;
1516 	struct nf_conntrack_zone tmp;
1517 	struct nf_conn *ct;
1518 	u32 hash;
1519 
1520 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1521 			     dataoff, state->pf, protonum, state->net,
1522 			     &tuple)) {
1523 		pr_debug("Can't get tuple\n");
1524 		return 0;
1525 	}
1526 
1527 	/* look for tuple match */
1528 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1529 	hash = hash_conntrack_raw(&tuple, state->net);
1530 	h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
1531 	if (!h) {
1532 		h = init_conntrack(state->net, tmpl, &tuple,
1533 				   skb, dataoff, hash);
1534 		if (!h)
1535 			return 0;
1536 		if (IS_ERR(h))
1537 			return PTR_ERR(h);
1538 	}
1539 	ct = nf_ct_tuplehash_to_ctrack(h);
1540 
1541 	/* It exists; we have (non-exclusive) reference. */
1542 	if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1543 		ctinfo = IP_CT_ESTABLISHED_REPLY;
1544 	} else {
1545 		/* Once we've had two way comms, always ESTABLISHED. */
1546 		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1547 			pr_debug("normal packet for %p\n", ct);
1548 			ctinfo = IP_CT_ESTABLISHED;
1549 		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1550 			pr_debug("related packet for %p\n", ct);
1551 			ctinfo = IP_CT_RELATED;
1552 		} else {
1553 			pr_debug("new packet for %p\n", ct);
1554 			ctinfo = IP_CT_NEW;
1555 		}
1556 	}
1557 	nf_ct_set(skb, ct, ctinfo);
1558 	return 0;
1559 }
1560 
1561 /*
1562  * icmp packets need special treatment to handle error messages that are
1563  * related to a connection.
1564  *
1565  * Callers need to check if skb has a conntrack assigned when this
1566  * helper returns; in such case skb belongs to an already known connection.
1567  */
1568 static unsigned int __cold
nf_conntrack_handle_icmp(struct nf_conn * tmpl,struct sk_buff * skb,unsigned int dataoff,u8 protonum,const struct nf_hook_state * state)1569 nf_conntrack_handle_icmp(struct nf_conn *tmpl,
1570 			 struct sk_buff *skb,
1571 			 unsigned int dataoff,
1572 			 u8 protonum,
1573 			 const struct nf_hook_state *state)
1574 {
1575 	int ret;
1576 
1577 	if (state->pf == NFPROTO_IPV4 && protonum == IPPROTO_ICMP)
1578 		ret = nf_conntrack_icmpv4_error(tmpl, skb, dataoff, state);
1579 #if IS_ENABLED(CONFIG_IPV6)
1580 	else if (state->pf == NFPROTO_IPV6 && protonum == IPPROTO_ICMPV6)
1581 		ret = nf_conntrack_icmpv6_error(tmpl, skb, dataoff, state);
1582 #endif
1583 	else
1584 		return NF_ACCEPT;
1585 
1586 	if (ret <= 0) {
1587 		NF_CT_STAT_INC_ATOMIC(state->net, error);
1588 		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1589 	}
1590 
1591 	return ret;
1592 }
1593 
generic_packet(struct nf_conn * ct,struct sk_buff * skb,enum ip_conntrack_info ctinfo)1594 static int generic_packet(struct nf_conn *ct, struct sk_buff *skb,
1595 			  enum ip_conntrack_info ctinfo)
1596 {
1597 	const unsigned int *timeout = nf_ct_timeout_lookup(ct);
1598 
1599 	if (!timeout)
1600 		timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
1601 
1602 	nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
1603 	return NF_ACCEPT;
1604 }
1605 
1606 /* Returns verdict for packet, or -1 for invalid. */
nf_conntrack_handle_packet(struct nf_conn * ct,struct sk_buff * skb,unsigned int dataoff,enum ip_conntrack_info ctinfo,const struct nf_hook_state * state)1607 static int nf_conntrack_handle_packet(struct nf_conn *ct,
1608 				      struct sk_buff *skb,
1609 				      unsigned int dataoff,
1610 				      enum ip_conntrack_info ctinfo,
1611 				      const struct nf_hook_state *state)
1612 {
1613 	switch (nf_ct_protonum(ct)) {
1614 	case IPPROTO_TCP:
1615 		return nf_conntrack_tcp_packet(ct, skb, dataoff,
1616 					       ctinfo, state);
1617 	case IPPROTO_UDP:
1618 		return nf_conntrack_udp_packet(ct, skb, dataoff,
1619 					       ctinfo, state);
1620 	case IPPROTO_ICMP:
1621 		return nf_conntrack_icmp_packet(ct, skb, ctinfo, state);
1622 #if IS_ENABLED(CONFIG_IPV6)
1623 	case IPPROTO_ICMPV6:
1624 		return nf_conntrack_icmpv6_packet(ct, skb, ctinfo, state);
1625 #endif
1626 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
1627 	case IPPROTO_UDPLITE:
1628 		return nf_conntrack_udplite_packet(ct, skb, dataoff,
1629 						   ctinfo, state);
1630 #endif
1631 #ifdef CONFIG_NF_CT_PROTO_SCTP
1632 	case IPPROTO_SCTP:
1633 		return nf_conntrack_sctp_packet(ct, skb, dataoff,
1634 						ctinfo, state);
1635 #endif
1636 #ifdef CONFIG_NF_CT_PROTO_DCCP
1637 	case IPPROTO_DCCP:
1638 		return nf_conntrack_dccp_packet(ct, skb, dataoff,
1639 						ctinfo, state);
1640 #endif
1641 #ifdef CONFIG_NF_CT_PROTO_GRE
1642 	case IPPROTO_GRE:
1643 		return nf_conntrack_gre_packet(ct, skb, dataoff,
1644 					       ctinfo, state);
1645 #endif
1646 	}
1647 
1648 	return generic_packet(ct, skb, ctinfo);
1649 }
1650 
1651 unsigned int
nf_conntrack_in(struct sk_buff * skb,const struct nf_hook_state * state)1652 nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
1653 {
1654 	enum ip_conntrack_info ctinfo;
1655 	struct nf_conn *ct, *tmpl;
1656 	u_int8_t protonum;
1657 	int dataoff, ret;
1658 
1659 	tmpl = nf_ct_get(skb, &ctinfo);
1660 	if (tmpl || ctinfo == IP_CT_UNTRACKED) {
1661 		/* Previously seen (loopback or untracked)?  Ignore. */
1662 		if ((tmpl && !nf_ct_is_template(tmpl)) ||
1663 		     ctinfo == IP_CT_UNTRACKED) {
1664 			NF_CT_STAT_INC_ATOMIC(state->net, ignore);
1665 			return NF_ACCEPT;
1666 		}
1667 		skb->_nfct = 0;
1668 	}
1669 
1670 	/* rcu_read_lock()ed by nf_hook_thresh */
1671 	dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
1672 	if (dataoff <= 0) {
1673 		pr_debug("not prepared to track yet or error occurred\n");
1674 		NF_CT_STAT_INC_ATOMIC(state->net, error);
1675 		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1676 		ret = NF_ACCEPT;
1677 		goto out;
1678 	}
1679 
1680 	if (protonum == IPPROTO_ICMP || protonum == IPPROTO_ICMPV6) {
1681 		ret = nf_conntrack_handle_icmp(tmpl, skb, dataoff,
1682 					       protonum, state);
1683 		if (ret <= 0) {
1684 			ret = -ret;
1685 			goto out;
1686 		}
1687 		/* ICMP[v6] protocol trackers may assign one conntrack. */
1688 		if (skb->_nfct)
1689 			goto out;
1690 	}
1691 repeat:
1692 	ret = resolve_normal_ct(tmpl, skb, dataoff,
1693 				protonum, state);
1694 	if (ret < 0) {
1695 		/* Too stressed to deal. */
1696 		NF_CT_STAT_INC_ATOMIC(state->net, drop);
1697 		ret = NF_DROP;
1698 		goto out;
1699 	}
1700 
1701 	ct = nf_ct_get(skb, &ctinfo);
1702 	if (!ct) {
1703 		/* Not valid part of a connection */
1704 		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1705 		ret = NF_ACCEPT;
1706 		goto out;
1707 	}
1708 
1709 	ret = nf_conntrack_handle_packet(ct, skb, dataoff, ctinfo, state);
1710 	if (ret <= 0) {
1711 		/* Invalid: inverse of the return code tells
1712 		 * the netfilter core what to do */
1713 		pr_debug("nf_conntrack_in: Can't track with proto module\n");
1714 		nf_conntrack_put(&ct->ct_general);
1715 		skb->_nfct = 0;
1716 		/* Special case: TCP tracker reports an attempt to reopen a
1717 		 * closed/aborted connection. We have to go back and create a
1718 		 * fresh conntrack.
1719 		 */
1720 		if (ret == -NF_REPEAT)
1721 			goto repeat;
1722 
1723 		NF_CT_STAT_INC_ATOMIC(state->net, invalid);
1724 		if (ret == -NF_DROP)
1725 			NF_CT_STAT_INC_ATOMIC(state->net, drop);
1726 
1727 		ret = -ret;
1728 		goto out;
1729 	}
1730 
1731 	if (ctinfo == IP_CT_ESTABLISHED_REPLY &&
1732 	    !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1733 		nf_conntrack_event_cache(IPCT_REPLY, ct);
1734 out:
1735 	if (tmpl)
1736 		nf_ct_put(tmpl);
1737 
1738 	return ret;
1739 }
1740 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1741 
1742 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1743    implicitly racy: see __nf_conntrack_confirm */
nf_conntrack_alter_reply(struct nf_conn * ct,const struct nf_conntrack_tuple * newreply)1744 void nf_conntrack_alter_reply(struct nf_conn *ct,
1745 			      const struct nf_conntrack_tuple *newreply)
1746 {
1747 	struct nf_conn_help *help = nfct_help(ct);
1748 
1749 	/* Should be unconfirmed, so not in hash table yet */
1750 	WARN_ON(nf_ct_is_confirmed(ct));
1751 
1752 	pr_debug("Altering reply tuple of %p to ", ct);
1753 	nf_ct_dump_tuple(newreply);
1754 
1755 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1756 	if (ct->master || (help && !hlist_empty(&help->expectations)))
1757 		return;
1758 
1759 	rcu_read_lock();
1760 	__nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1761 	rcu_read_unlock();
1762 }
1763 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1764 
1765 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
__nf_ct_refresh_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb,u32 extra_jiffies,bool do_acct)1766 void __nf_ct_refresh_acct(struct nf_conn *ct,
1767 			  enum ip_conntrack_info ctinfo,
1768 			  const struct sk_buff *skb,
1769 			  u32 extra_jiffies,
1770 			  bool do_acct)
1771 {
1772 	/* Only update if this is not a fixed timeout */
1773 	if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1774 		goto acct;
1775 
1776 	/* If not in hash table, timer will not be active yet */
1777 	if (nf_ct_is_confirmed(ct))
1778 		extra_jiffies += nfct_time_stamp;
1779 
1780 	if (READ_ONCE(ct->timeout) != extra_jiffies)
1781 		WRITE_ONCE(ct->timeout, extra_jiffies);
1782 acct:
1783 	if (do_acct)
1784 		nf_ct_acct_update(ct, ctinfo, skb->len);
1785 }
1786 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1787 
nf_ct_kill_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb)1788 bool nf_ct_kill_acct(struct nf_conn *ct,
1789 		     enum ip_conntrack_info ctinfo,
1790 		     const struct sk_buff *skb)
1791 {
1792 	nf_ct_acct_update(ct, ctinfo, skb->len);
1793 
1794 	return nf_ct_delete(ct, 0, 0);
1795 }
1796 EXPORT_SYMBOL_GPL(nf_ct_kill_acct);
1797 
1798 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1799 
1800 #include <linux/netfilter/nfnetlink.h>
1801 #include <linux/netfilter/nfnetlink_conntrack.h>
1802 #include <linux/mutex.h>
1803 
1804 /* Generic function for tcp/udp/sctp/dccp and alike. */
nf_ct_port_tuple_to_nlattr(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)1805 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1806 			       const struct nf_conntrack_tuple *tuple)
1807 {
1808 	if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1809 	    nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1810 		goto nla_put_failure;
1811 	return 0;
1812 
1813 nla_put_failure:
1814 	return -1;
1815 }
1816 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1817 
1818 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1819 	[CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1820 	[CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1821 };
1822 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1823 
nf_ct_port_nlattr_to_tuple(struct nlattr * tb[],struct nf_conntrack_tuple * t)1824 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1825 			       struct nf_conntrack_tuple *t)
1826 {
1827 	if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1828 		return -EINVAL;
1829 
1830 	t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1831 	t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1832 
1833 	return 0;
1834 }
1835 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1836 
nf_ct_port_nlattr_tuple_size(void)1837 unsigned int nf_ct_port_nlattr_tuple_size(void)
1838 {
1839 	static unsigned int size __read_mostly;
1840 
1841 	if (!size)
1842 		size = nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1843 
1844 	return size;
1845 }
1846 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1847 #endif
1848 
1849 /* Used by ipt_REJECT and ip6t_REJECT. */
nf_conntrack_attach(struct sk_buff * nskb,const struct sk_buff * skb)1850 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1851 {
1852 	struct nf_conn *ct;
1853 	enum ip_conntrack_info ctinfo;
1854 
1855 	/* This ICMP is in reverse direction to the packet which caused it */
1856 	ct = nf_ct_get(skb, &ctinfo);
1857 	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1858 		ctinfo = IP_CT_RELATED_REPLY;
1859 	else
1860 		ctinfo = IP_CT_RELATED;
1861 
1862 	/* Attach to new skbuff, and increment count */
1863 	nf_ct_set(nskb, ct, ctinfo);
1864 	nf_conntrack_get(skb_nfct(nskb));
1865 }
1866 
__nf_conntrack_update(struct net * net,struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo)1867 static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
1868 				 struct nf_conn *ct,
1869 				 enum ip_conntrack_info ctinfo)
1870 {
1871 	struct nf_conntrack_tuple_hash *h;
1872 	struct nf_conntrack_tuple tuple;
1873 	struct nf_nat_hook *nat_hook;
1874 	unsigned int status;
1875 	int dataoff;
1876 	u16 l3num;
1877 	u8 l4num;
1878 
1879 	l3num = nf_ct_l3num(ct);
1880 
1881 	dataoff = get_l4proto(skb, skb_network_offset(skb), l3num, &l4num);
1882 	if (dataoff <= 0)
1883 		return -1;
1884 
1885 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb), dataoff, l3num,
1886 			     l4num, net, &tuple))
1887 		return -1;
1888 
1889 	if (ct->status & IPS_SRC_NAT) {
1890 		memcpy(tuple.src.u3.all,
1891 		       ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.all,
1892 		       sizeof(tuple.src.u3.all));
1893 		tuple.src.u.all =
1894 			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all;
1895 	}
1896 
1897 	if (ct->status & IPS_DST_NAT) {
1898 		memcpy(tuple.dst.u3.all,
1899 		       ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.all,
1900 		       sizeof(tuple.dst.u3.all));
1901 		tuple.dst.u.all =
1902 			ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all;
1903 	}
1904 
1905 	h = nf_conntrack_find_get(net, nf_ct_zone(ct), &tuple);
1906 	if (!h)
1907 		return 0;
1908 
1909 	/* Store status bits of the conntrack that is clashing to re-do NAT
1910 	 * mangling according to what it has been done already to this packet.
1911 	 */
1912 	status = ct->status;
1913 
1914 	nf_ct_put(ct);
1915 	ct = nf_ct_tuplehash_to_ctrack(h);
1916 	nf_ct_set(skb, ct, ctinfo);
1917 
1918 	nat_hook = rcu_dereference(nf_nat_hook);
1919 	if (!nat_hook)
1920 		return 0;
1921 
1922 	if (status & IPS_SRC_NAT &&
1923 	    nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_SRC,
1924 				IP_CT_DIR_ORIGINAL) == NF_DROP)
1925 		return -1;
1926 
1927 	if (status & IPS_DST_NAT &&
1928 	    nat_hook->manip_pkt(skb, ct, NF_NAT_MANIP_DST,
1929 				IP_CT_DIR_ORIGINAL) == NF_DROP)
1930 		return -1;
1931 
1932 	return 0;
1933 }
1934 
1935 /* This packet is coming from userspace via nf_queue, complete the packet
1936  * processing after the helper invocation in nf_confirm().
1937  */
nf_confirm_cthelper(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo)1938 static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
1939 			       enum ip_conntrack_info ctinfo)
1940 {
1941 	const struct nf_conntrack_helper *helper;
1942 	const struct nf_conn_help *help;
1943 	int protoff;
1944 
1945 	help = nfct_help(ct);
1946 	if (!help)
1947 		return 0;
1948 
1949 	helper = rcu_dereference(help->helper);
1950 	if (!helper)
1951 		return 0;
1952 
1953 	if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
1954 		return 0;
1955 
1956 	switch (nf_ct_l3num(ct)) {
1957 	case NFPROTO_IPV4:
1958 		protoff = skb_network_offset(skb) + ip_hdrlen(skb);
1959 		break;
1960 #if IS_ENABLED(CONFIG_IPV6)
1961 	case NFPROTO_IPV6: {
1962 		__be16 frag_off;
1963 		u8 pnum;
1964 
1965 		pnum = ipv6_hdr(skb)->nexthdr;
1966 		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum,
1967 					   &frag_off);
1968 		if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
1969 			return 0;
1970 		break;
1971 	}
1972 #endif
1973 	default:
1974 		return 0;
1975 	}
1976 
1977 	if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
1978 	    !nf_is_loopback_packet(skb)) {
1979 		if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
1980 			NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
1981 			return -1;
1982 		}
1983 	}
1984 
1985 	/* We've seen it coming out the other side: confirm it */
1986 	return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0;
1987 }
1988 
nf_conntrack_update(struct net * net,struct sk_buff * skb)1989 static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
1990 {
1991 	enum ip_conntrack_info ctinfo;
1992 	struct nf_conn *ct;
1993 	int err;
1994 
1995 	ct = nf_ct_get(skb, &ctinfo);
1996 	if (!ct)
1997 		return 0;
1998 
1999 	if (!nf_ct_is_confirmed(ct)) {
2000 		err = __nf_conntrack_update(net, skb, ct, ctinfo);
2001 		if (err < 0)
2002 			return err;
2003 
2004 		ct = nf_ct_get(skb, &ctinfo);
2005 	}
2006 
2007 	return nf_confirm_cthelper(skb, ct, ctinfo);
2008 }
2009 
nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple * dst_tuple,const struct sk_buff * skb)2010 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
2011 				       const struct sk_buff *skb)
2012 {
2013 	const struct nf_conntrack_tuple *src_tuple;
2014 	const struct nf_conntrack_tuple_hash *hash;
2015 	struct nf_conntrack_tuple srctuple;
2016 	enum ip_conntrack_info ctinfo;
2017 	struct nf_conn *ct;
2018 
2019 	ct = nf_ct_get(skb, &ctinfo);
2020 	if (ct) {
2021 		src_tuple = nf_ct_tuple(ct, CTINFO2DIR(ctinfo));
2022 		memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
2023 		return true;
2024 	}
2025 
2026 	if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb),
2027 			       NFPROTO_IPV4, dev_net(skb->dev),
2028 			       &srctuple))
2029 		return false;
2030 
2031 	hash = nf_conntrack_find_get(dev_net(skb->dev),
2032 				     &nf_ct_zone_dflt,
2033 				     &srctuple);
2034 	if (!hash)
2035 		return false;
2036 
2037 	ct = nf_ct_tuplehash_to_ctrack(hash);
2038 	src_tuple = nf_ct_tuple(ct, !hash->tuple.dst.dir);
2039 	memcpy(dst_tuple, src_tuple, sizeof(*dst_tuple));
2040 	nf_ct_put(ct);
2041 
2042 	return true;
2043 }
2044 
2045 /* Bring out ya dead! */
2046 static struct nf_conn *
get_next_corpse(int (* iter)(struct nf_conn * i,void * data),void * data,unsigned int * bucket)2047 get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
2048 		void *data, unsigned int *bucket)
2049 {
2050 	struct nf_conntrack_tuple_hash *h;
2051 	struct nf_conn *ct;
2052 	struct hlist_nulls_node *n;
2053 	spinlock_t *lockp;
2054 
2055 	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
2056 		lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
2057 		local_bh_disable();
2058 		nf_conntrack_lock(lockp);
2059 		if (*bucket < nf_conntrack_htable_size) {
2060 			hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
2061 				if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
2062 					continue;
2063 				ct = nf_ct_tuplehash_to_ctrack(h);
2064 				if (iter(ct, data))
2065 					goto found;
2066 			}
2067 		}
2068 		spin_unlock(lockp);
2069 		local_bh_enable();
2070 		cond_resched();
2071 	}
2072 
2073 	return NULL;
2074 found:
2075 	atomic_inc(&ct->ct_general.use);
2076 	spin_unlock(lockp);
2077 	local_bh_enable();
2078 	return ct;
2079 }
2080 
nf_ct_iterate_cleanup(int (* iter)(struct nf_conn * i,void * data),void * data,u32 portid,int report)2081 static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
2082 				  void *data, u32 portid, int report)
2083 {
2084 	unsigned int bucket = 0, sequence;
2085 	struct nf_conn *ct;
2086 
2087 	might_sleep();
2088 
2089 	for (;;) {
2090 		sequence = read_seqcount_begin(&nf_conntrack_generation);
2091 
2092 		while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
2093 			/* Time to push up daises... */
2094 
2095 			nf_ct_delete(ct, portid, report);
2096 			nf_ct_put(ct);
2097 			cond_resched();
2098 		}
2099 
2100 		if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
2101 			break;
2102 		bucket = 0;
2103 	}
2104 }
2105 
2106 struct iter_data {
2107 	int (*iter)(struct nf_conn *i, void *data);
2108 	void *data;
2109 	struct net *net;
2110 };
2111 
iter_net_only(struct nf_conn * i,void * data)2112 static int iter_net_only(struct nf_conn *i, void *data)
2113 {
2114 	struct iter_data *d = data;
2115 
2116 	if (!net_eq(d->net, nf_ct_net(i)))
2117 		return 0;
2118 
2119 	return d->iter(i, d->data);
2120 }
2121 
2122 static void
__nf_ct_unconfirmed_destroy(struct net * net)2123 __nf_ct_unconfirmed_destroy(struct net *net)
2124 {
2125 	int cpu;
2126 
2127 	for_each_possible_cpu(cpu) {
2128 		struct nf_conntrack_tuple_hash *h;
2129 		struct hlist_nulls_node *n;
2130 		struct ct_pcpu *pcpu;
2131 
2132 		pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2133 
2134 		spin_lock_bh(&pcpu->lock);
2135 		hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
2136 			struct nf_conn *ct;
2137 
2138 			ct = nf_ct_tuplehash_to_ctrack(h);
2139 
2140 			/* we cannot call iter() on unconfirmed list, the
2141 			 * owning cpu can reallocate ct->ext at any time.
2142 			 */
2143 			set_bit(IPS_DYING_BIT, &ct->status);
2144 		}
2145 		spin_unlock_bh(&pcpu->lock);
2146 		cond_resched();
2147 	}
2148 }
2149 
nf_ct_unconfirmed_destroy(struct net * net)2150 void nf_ct_unconfirmed_destroy(struct net *net)
2151 {
2152 	might_sleep();
2153 
2154 	if (atomic_read(&net->ct.count) > 0) {
2155 		__nf_ct_unconfirmed_destroy(net);
2156 		nf_queue_nf_hook_drop(net);
2157 		synchronize_net();
2158 	}
2159 }
2160 EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy);
2161 
nf_ct_iterate_cleanup_net(struct net * net,int (* iter)(struct nf_conn * i,void * data),void * data,u32 portid,int report)2162 void nf_ct_iterate_cleanup_net(struct net *net,
2163 			       int (*iter)(struct nf_conn *i, void *data),
2164 			       void *data, u32 portid, int report)
2165 {
2166 	struct iter_data d;
2167 
2168 	might_sleep();
2169 
2170 	if (atomic_read(&net->ct.count) == 0)
2171 		return;
2172 
2173 	d.iter = iter;
2174 	d.data = data;
2175 	d.net = net;
2176 
2177 	nf_ct_iterate_cleanup(iter_net_only, &d, portid, report);
2178 }
2179 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net);
2180 
2181 /**
2182  * nf_ct_iterate_destroy - destroy unconfirmed conntracks and iterate table
2183  * @iter: callback to invoke for each conntrack
2184  * @data: data to pass to @iter
2185  *
2186  * Like nf_ct_iterate_cleanup, but first marks conntracks on the
2187  * unconfirmed list as dying (so they will not be inserted into
2188  * main table).
2189  *
2190  * Can only be called in module exit path.
2191  */
2192 void
nf_ct_iterate_destroy(int (* iter)(struct nf_conn * i,void * data),void * data)2193 nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data)
2194 {
2195 	struct net *net;
2196 
2197 	down_read(&net_rwsem);
2198 	for_each_net(net) {
2199 		if (atomic_read(&net->ct.count) == 0)
2200 			continue;
2201 		__nf_ct_unconfirmed_destroy(net);
2202 		nf_queue_nf_hook_drop(net);
2203 	}
2204 	up_read(&net_rwsem);
2205 
2206 	/* Need to wait for netns cleanup worker to finish, if its
2207 	 * running -- it might have deleted a net namespace from
2208 	 * the global list, so our __nf_ct_unconfirmed_destroy() might
2209 	 * not have affected all namespaces.
2210 	 */
2211 	net_ns_barrier();
2212 
2213 	/* a conntrack could have been unlinked from unconfirmed list
2214 	 * before we grabbed pcpu lock in __nf_ct_unconfirmed_destroy().
2215 	 * This makes sure its inserted into conntrack table.
2216 	 */
2217 	synchronize_net();
2218 
2219 	nf_ct_iterate_cleanup(iter, data, 0, 0);
2220 }
2221 EXPORT_SYMBOL_GPL(nf_ct_iterate_destroy);
2222 
kill_all(struct nf_conn * i,void * data)2223 static int kill_all(struct nf_conn *i, void *data)
2224 {
2225 	return net_eq(nf_ct_net(i), data);
2226 }
2227 
nf_conntrack_cleanup_start(void)2228 void nf_conntrack_cleanup_start(void)
2229 {
2230 	conntrack_gc_work.exiting = true;
2231 	RCU_INIT_POINTER(ip_ct_attach, NULL);
2232 }
2233 
nf_conntrack_cleanup_end(void)2234 void nf_conntrack_cleanup_end(void)
2235 {
2236 	RCU_INIT_POINTER(nf_ct_hook, NULL);
2237 	cancel_delayed_work_sync(&conntrack_gc_work.dwork);
2238 	kvfree(nf_conntrack_hash);
2239 
2240 	nf_conntrack_proto_fini();
2241 	nf_conntrack_seqadj_fini();
2242 	nf_conntrack_labels_fini();
2243 	nf_conntrack_helper_fini();
2244 	nf_conntrack_timeout_fini();
2245 	nf_conntrack_ecache_fini();
2246 	nf_conntrack_tstamp_fini();
2247 	nf_conntrack_acct_fini();
2248 	nf_conntrack_expect_fini();
2249 
2250 	kmem_cache_destroy(nf_conntrack_cachep);
2251 }
2252 
2253 /*
2254  * Mishearing the voices in his head, our hero wonders how he's
2255  * supposed to kill the mall.
2256  */
nf_conntrack_cleanup_net(struct net * net)2257 void nf_conntrack_cleanup_net(struct net *net)
2258 {
2259 	LIST_HEAD(single);
2260 
2261 	list_add(&net->exit_list, &single);
2262 	nf_conntrack_cleanup_net_list(&single);
2263 }
2264 
nf_conntrack_cleanup_net_list(struct list_head * net_exit_list)2265 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
2266 {
2267 	int busy;
2268 	struct net *net;
2269 
2270 	/*
2271 	 * This makes sure all current packets have passed through
2272 	 *  netfilter framework.  Roll on, two-stage module
2273 	 *  delete...
2274 	 */
2275 	synchronize_net();
2276 i_see_dead_people:
2277 	busy = 0;
2278 	list_for_each_entry(net, net_exit_list, exit_list) {
2279 		nf_ct_iterate_cleanup(kill_all, net, 0, 0);
2280 		if (atomic_read(&net->ct.count) != 0)
2281 			busy = 1;
2282 	}
2283 	if (busy) {
2284 		schedule();
2285 		goto i_see_dead_people;
2286 	}
2287 
2288 	list_for_each_entry(net, net_exit_list, exit_list) {
2289 		nf_conntrack_proto_pernet_fini(net);
2290 		nf_conntrack_ecache_pernet_fini(net);
2291 		nf_conntrack_expect_pernet_fini(net);
2292 		free_percpu(net->ct.stat);
2293 		free_percpu(net->ct.pcpu_lists);
2294 	}
2295 }
2296 
nf_ct_alloc_hashtable(unsigned int * sizep,int nulls)2297 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
2298 {
2299 	struct hlist_nulls_head *hash;
2300 	unsigned int nr_slots, i;
2301 
2302 	if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
2303 		return NULL;
2304 
2305 	BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
2306 	nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
2307 
2308 	hash = kvmalloc_array(nr_slots, sizeof(struct hlist_nulls_head),
2309 			      GFP_KERNEL | __GFP_ZERO);
2310 
2311 	if (hash && nulls)
2312 		for (i = 0; i < nr_slots; i++)
2313 			INIT_HLIST_NULLS_HEAD(&hash[i], i);
2314 
2315 	return hash;
2316 }
2317 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
2318 
nf_conntrack_hash_resize(unsigned int hashsize)2319 int nf_conntrack_hash_resize(unsigned int hashsize)
2320 {
2321 	int i, bucket;
2322 	unsigned int old_size;
2323 	struct hlist_nulls_head *hash, *old_hash;
2324 	struct nf_conntrack_tuple_hash *h;
2325 	struct nf_conn *ct;
2326 
2327 	if (!hashsize)
2328 		return -EINVAL;
2329 
2330 	hash = nf_ct_alloc_hashtable(&hashsize, 1);
2331 	if (!hash)
2332 		return -ENOMEM;
2333 
2334 	old_size = nf_conntrack_htable_size;
2335 	if (old_size == hashsize) {
2336 		kvfree(hash);
2337 		return 0;
2338 	}
2339 
2340 	local_bh_disable();
2341 	nf_conntrack_all_lock();
2342 	write_seqcount_begin(&nf_conntrack_generation);
2343 
2344 	/* Lookups in the old hash might happen in parallel, which means we
2345 	 * might get false negatives during connection lookup. New connections
2346 	 * created because of a false negative won't make it into the hash
2347 	 * though since that required taking the locks.
2348 	 */
2349 
2350 	for (i = 0; i < nf_conntrack_htable_size; i++) {
2351 		while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
2352 			h = hlist_nulls_entry(nf_conntrack_hash[i].first,
2353 					      struct nf_conntrack_tuple_hash, hnnode);
2354 			ct = nf_ct_tuplehash_to_ctrack(h);
2355 			hlist_nulls_del_rcu(&h->hnnode);
2356 			bucket = __hash_conntrack(nf_ct_net(ct),
2357 						  &h->tuple, hashsize);
2358 			hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
2359 		}
2360 	}
2361 	old_size = nf_conntrack_htable_size;
2362 	old_hash = nf_conntrack_hash;
2363 
2364 	nf_conntrack_hash = hash;
2365 	nf_conntrack_htable_size = hashsize;
2366 
2367 	write_seqcount_end(&nf_conntrack_generation);
2368 	nf_conntrack_all_unlock();
2369 	local_bh_enable();
2370 
2371 	synchronize_net();
2372 	kvfree(old_hash);
2373 	return 0;
2374 }
2375 
nf_conntrack_set_hashsize(const char * val,const struct kernel_param * kp)2376 int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
2377 {
2378 	unsigned int hashsize;
2379 	int rc;
2380 
2381 	if (current->nsproxy->net_ns != &init_net)
2382 		return -EOPNOTSUPP;
2383 
2384 	/* On boot, we can set this without any fancy locking. */
2385 	if (!nf_conntrack_hash)
2386 		return param_set_uint(val, kp);
2387 
2388 	rc = kstrtouint(val, 0, &hashsize);
2389 	if (rc)
2390 		return rc;
2391 
2392 	return nf_conntrack_hash_resize(hashsize);
2393 }
2394 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
2395 
total_extension_size(void)2396 static __always_inline unsigned int total_extension_size(void)
2397 {
2398 	/* remember to add new extensions below */
2399 	BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
2400 
2401 	return sizeof(struct nf_ct_ext) +
2402 	       sizeof(struct nf_conn_help)
2403 #if IS_ENABLED(CONFIG_NF_NAT)
2404 		+ sizeof(struct nf_conn_nat)
2405 #endif
2406 		+ sizeof(struct nf_conn_seqadj)
2407 		+ sizeof(struct nf_conn_acct)
2408 #ifdef CONFIG_NF_CONNTRACK_EVENTS
2409 		+ sizeof(struct nf_conntrack_ecache)
2410 #endif
2411 #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
2412 		+ sizeof(struct nf_conn_tstamp)
2413 #endif
2414 #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
2415 		+ sizeof(struct nf_conn_timeout)
2416 #endif
2417 #ifdef CONFIG_NF_CONNTRACK_LABELS
2418 		+ sizeof(struct nf_conn_labels)
2419 #endif
2420 #if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
2421 		+ sizeof(struct nf_conn_synproxy)
2422 #endif
2423 	;
2424 };
2425 
nf_conntrack_init_start(void)2426 int nf_conntrack_init_start(void)
2427 {
2428 	unsigned long nr_pages = totalram_pages();
2429 	int max_factor = 8;
2430 	int ret = -ENOMEM;
2431 	int i;
2432 
2433 	/* struct nf_ct_ext uses u8 to store offsets/size */
2434 	BUILD_BUG_ON(total_extension_size() > 255u);
2435 
2436 	seqcount_init(&nf_conntrack_generation);
2437 
2438 	for (i = 0; i < CONNTRACK_LOCKS; i++)
2439 		spin_lock_init(&nf_conntrack_locks[i]);
2440 
2441 	if (!nf_conntrack_htable_size) {
2442 		/* Idea from tcp.c: use 1/16384 of memory.
2443 		 * On i386: 32MB machine has 512 buckets.
2444 		 * >= 1GB machines have 16384 buckets.
2445 		 * >= 4GB machines have 65536 buckets.
2446 		 */
2447 		nf_conntrack_htable_size
2448 			= (((nr_pages << PAGE_SHIFT) / 16384)
2449 			   / sizeof(struct hlist_head));
2450 		if (nr_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
2451 			nf_conntrack_htable_size = 65536;
2452 		else if (nr_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
2453 			nf_conntrack_htable_size = 16384;
2454 		if (nf_conntrack_htable_size < 32)
2455 			nf_conntrack_htable_size = 32;
2456 
2457 		/* Use a max. factor of four by default to get the same max as
2458 		 * with the old struct list_heads. When a table size is given
2459 		 * we use the old value of 8 to avoid reducing the max.
2460 		 * entries. */
2461 		max_factor = 4;
2462 	}
2463 
2464 	nf_conntrack_hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size, 1);
2465 	if (!nf_conntrack_hash)
2466 		return -ENOMEM;
2467 
2468 	nf_conntrack_max = max_factor * nf_conntrack_htable_size;
2469 
2470 	nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
2471 						sizeof(struct nf_conn),
2472 						NFCT_INFOMASK + 1,
2473 						SLAB_TYPESAFE_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
2474 	if (!nf_conntrack_cachep)
2475 		goto err_cachep;
2476 
2477 	ret = nf_conntrack_expect_init();
2478 	if (ret < 0)
2479 		goto err_expect;
2480 
2481 	ret = nf_conntrack_acct_init();
2482 	if (ret < 0)
2483 		goto err_acct;
2484 
2485 	ret = nf_conntrack_tstamp_init();
2486 	if (ret < 0)
2487 		goto err_tstamp;
2488 
2489 	ret = nf_conntrack_ecache_init();
2490 	if (ret < 0)
2491 		goto err_ecache;
2492 
2493 	ret = nf_conntrack_timeout_init();
2494 	if (ret < 0)
2495 		goto err_timeout;
2496 
2497 	ret = nf_conntrack_helper_init();
2498 	if (ret < 0)
2499 		goto err_helper;
2500 
2501 	ret = nf_conntrack_labels_init();
2502 	if (ret < 0)
2503 		goto err_labels;
2504 
2505 	ret = nf_conntrack_seqadj_init();
2506 	if (ret < 0)
2507 		goto err_seqadj;
2508 
2509 	ret = nf_conntrack_proto_init();
2510 	if (ret < 0)
2511 		goto err_proto;
2512 
2513 	conntrack_gc_work_init(&conntrack_gc_work);
2514 	queue_delayed_work(system_power_efficient_wq, &conntrack_gc_work.dwork, HZ);
2515 
2516 	return 0;
2517 
2518 err_proto:
2519 	nf_conntrack_seqadj_fini();
2520 err_seqadj:
2521 	nf_conntrack_labels_fini();
2522 err_labels:
2523 	nf_conntrack_helper_fini();
2524 err_helper:
2525 	nf_conntrack_timeout_fini();
2526 err_timeout:
2527 	nf_conntrack_ecache_fini();
2528 err_ecache:
2529 	nf_conntrack_tstamp_fini();
2530 err_tstamp:
2531 	nf_conntrack_acct_fini();
2532 err_acct:
2533 	nf_conntrack_expect_fini();
2534 err_expect:
2535 	kmem_cache_destroy(nf_conntrack_cachep);
2536 err_cachep:
2537 	kvfree(nf_conntrack_hash);
2538 	return ret;
2539 }
2540 
2541 static struct nf_ct_hook nf_conntrack_hook = {
2542 	.update		= nf_conntrack_update,
2543 	.destroy	= destroy_conntrack,
2544 	.get_tuple_skb  = nf_conntrack_get_tuple_skb,
2545 };
2546 
nf_conntrack_init_end(void)2547 void nf_conntrack_init_end(void)
2548 {
2549 	/* For use by REJECT target */
2550 	RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
2551 	RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
2552 }
2553 
2554 /*
2555  * We need to use special "null" values, not used in hash table
2556  */
2557 #define UNCONFIRMED_NULLS_VAL	((1<<30)+0)
2558 #define DYING_NULLS_VAL		((1<<30)+1)
2559 #define TEMPLATE_NULLS_VAL	((1<<30)+2)
2560 
nf_conntrack_init_net(struct net * net)2561 int nf_conntrack_init_net(struct net *net)
2562 {
2563 	int ret = -ENOMEM;
2564 	int cpu;
2565 
2566 	BUILD_BUG_ON(IP_CT_UNTRACKED == IP_CT_NUMBER);
2567 	BUILD_BUG_ON_NOT_POWER_OF_2(CONNTRACK_LOCKS);
2568 	atomic_set(&net->ct.count, 0);
2569 
2570 	net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
2571 	if (!net->ct.pcpu_lists)
2572 		goto err_stat;
2573 
2574 	for_each_possible_cpu(cpu) {
2575 		struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
2576 
2577 		spin_lock_init(&pcpu->lock);
2578 		INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
2579 		INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
2580 	}
2581 
2582 	net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
2583 	if (!net->ct.stat)
2584 		goto err_pcpu_lists;
2585 
2586 	ret = nf_conntrack_expect_pernet_init(net);
2587 	if (ret < 0)
2588 		goto err_expect;
2589 
2590 	nf_conntrack_acct_pernet_init(net);
2591 	nf_conntrack_tstamp_pernet_init(net);
2592 	nf_conntrack_ecache_pernet_init(net);
2593 	nf_conntrack_helper_pernet_init(net);
2594 	nf_conntrack_proto_pernet_init(net);
2595 
2596 	return 0;
2597 
2598 err_expect:
2599 	free_percpu(net->ct.stat);
2600 err_pcpu_lists:
2601 	free_percpu(net->ct.pcpu_lists);
2602 err_stat:
2603 	return ret;
2604 }
2605