• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4 
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/types.h>
16 #include <linux/netfilter.h>
17 #include <linux/module.h>
18 #include <linux/sched.h>
19 #include <linux/skbuff.h>
20 #include <linux/proc_fs.h>
21 #include <linux/vmalloc.h>
22 #include <linux/stddef.h>
23 #include <linux/slab.h>
24 #include <linux/random.h>
25 #include <linux/jhash.h>
26 #include <linux/siphash.h>
27 #include <linux/err.h>
28 #include <linux/percpu.h>
29 #include <linux/moduleparam.h>
30 #include <linux/notifier.h>
31 #include <linux/kernel.h>
32 #include <linux/netdevice.h>
33 #include <linux/socket.h>
34 #include <linux/mm.h>
35 #include <linux/nsproxy.h>
36 #include <linux/rculist_nulls.h>
37 
38 #include <net/netfilter/nf_conntrack.h>
39 #include <net/netfilter/nf_conntrack_l3proto.h>
40 #include <net/netfilter/nf_conntrack_l4proto.h>
41 #include <net/netfilter/nf_conntrack_expect.h>
42 #include <net/netfilter/nf_conntrack_helper.h>
43 #include <net/netfilter/nf_conntrack_seqadj.h>
44 #include <net/netfilter/nf_conntrack_core.h>
45 #include <net/netfilter/nf_conntrack_extend.h>
46 #include <net/netfilter/nf_conntrack_acct.h>
47 #include <net/netfilter/nf_conntrack_ecache.h>
48 #include <net/netfilter/nf_conntrack_zones.h>
49 #include <net/netfilter/nf_conntrack_timestamp.h>
50 #include <net/netfilter/nf_conntrack_timeout.h>
51 #include <net/netfilter/nf_conntrack_labels.h>
52 #include <net/netfilter/nf_conntrack_synproxy.h>
53 #include <net/netfilter/nf_nat.h>
54 #include <net/netfilter/nf_nat_core.h>
55 #include <net/netfilter/nf_nat_helper.h>
56 
57 #define NF_CONNTRACK_VERSION	"0.5.0"
58 
59 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
60 				      enum nf_nat_manip_type manip,
61 				      const struct nlattr *attr) __read_mostly;
62 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
63 
64 __cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
65 EXPORT_SYMBOL_GPL(nf_conntrack_locks);
66 
67 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
68 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
69 
nf_conntrack_double_unlock(unsigned int h1,unsigned int h2)70 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
71 {
72 	h1 %= CONNTRACK_LOCKS;
73 	h2 %= CONNTRACK_LOCKS;
74 	spin_unlock(&nf_conntrack_locks[h1]);
75 	if (h1 != h2)
76 		spin_unlock(&nf_conntrack_locks[h2]);
77 }
78 
79 /* return true if we need to recompute hashes (in case hash table was resized) */
nf_conntrack_double_lock(struct net * net,unsigned int h1,unsigned int h2,unsigned int sequence)80 static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
81 				     unsigned int h2, unsigned int sequence)
82 {
83 	h1 %= CONNTRACK_LOCKS;
84 	h2 %= CONNTRACK_LOCKS;
85 	if (h1 <= h2) {
86 		spin_lock(&nf_conntrack_locks[h1]);
87 		if (h1 != h2)
88 			spin_lock_nested(&nf_conntrack_locks[h2],
89 					 SINGLE_DEPTH_NESTING);
90 	} else {
91 		spin_lock(&nf_conntrack_locks[h2]);
92 		spin_lock_nested(&nf_conntrack_locks[h1],
93 				 SINGLE_DEPTH_NESTING);
94 	}
95 	if (read_seqcount_retry(&net->ct.generation, sequence)) {
96 		nf_conntrack_double_unlock(h1, h2);
97 		return true;
98 	}
99 	return false;
100 }
101 
nf_conntrack_all_lock(void)102 static void nf_conntrack_all_lock(void)
103 {
104 	int i;
105 
106 	for (i = 0; i < CONNTRACK_LOCKS; i++)
107 		spin_lock_nested(&nf_conntrack_locks[i], i);
108 }
109 
nf_conntrack_all_unlock(void)110 static void nf_conntrack_all_unlock(void)
111 {
112 	int i;
113 
114 	for (i = 0; i < CONNTRACK_LOCKS; i++)
115 		spin_unlock(&nf_conntrack_locks[i]);
116 }
117 
118 unsigned int nf_conntrack_htable_size __read_mostly;
119 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
120 
121 unsigned int nf_conntrack_max __read_mostly;
122 EXPORT_SYMBOL_GPL(nf_conntrack_max);
123 
124 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
125 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
126 
127 unsigned int nf_conntrack_hash_rnd __read_mostly;
128 EXPORT_SYMBOL_GPL(nf_conntrack_hash_rnd);
129 
hash_conntrack_raw(const struct nf_conntrack_tuple * tuple)130 static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple)
131 {
132 	unsigned int n;
133 
134 	/* The direction must be ignored, so we hash everything up to the
135 	 * destination ports (which is a multiple of 4) and treat the last
136 	 * three bytes manually.
137 	 */
138 	n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
139 	return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^
140 		      (((__force __u16)tuple->dst.u.all << 16) |
141 		      tuple->dst.protonum));
142 }
143 
__hash_bucket(u32 hash,unsigned int size)144 static u32 __hash_bucket(u32 hash, unsigned int size)
145 {
146 	return reciprocal_scale(hash, size);
147 }
148 
hash_bucket(u32 hash,const struct net * net)149 static u32 hash_bucket(u32 hash, const struct net *net)
150 {
151 	return __hash_bucket(hash, net->ct.htable_size);
152 }
153 
__hash_conntrack(const struct nf_conntrack_tuple * tuple,unsigned int size)154 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
155 				  unsigned int size)
156 {
157 	return __hash_bucket(hash_conntrack_raw(tuple), size);
158 }
159 
hash_conntrack(const struct net * net,const struct nf_conntrack_tuple * tuple)160 static inline u_int32_t hash_conntrack(const struct net *net,
161 				       const struct nf_conntrack_tuple *tuple)
162 {
163 	return __hash_conntrack(tuple, net->ct.htable_size);
164 }
165 
166 bool
nf_ct_get_tuple(const struct sk_buff * skb,unsigned int nhoff,unsigned int dataoff,u_int16_t l3num,u_int8_t protonum,struct net * net,struct nf_conntrack_tuple * tuple,const struct nf_conntrack_l3proto * l3proto,const struct nf_conntrack_l4proto * l4proto)167 nf_ct_get_tuple(const struct sk_buff *skb,
168 		unsigned int nhoff,
169 		unsigned int dataoff,
170 		u_int16_t l3num,
171 		u_int8_t protonum,
172 		struct net *net,
173 		struct nf_conntrack_tuple *tuple,
174 		const struct nf_conntrack_l3proto *l3proto,
175 		const struct nf_conntrack_l4proto *l4proto)
176 {
177 	memset(tuple, 0, sizeof(*tuple));
178 
179 	tuple->src.l3num = l3num;
180 	if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
181 		return false;
182 
183 	tuple->dst.protonum = protonum;
184 	tuple->dst.dir = IP_CT_DIR_ORIGINAL;
185 
186 	return l4proto->pkt_to_tuple(skb, dataoff, net, tuple);
187 }
188 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
189 
nf_ct_get_tuplepr(const struct sk_buff * skb,unsigned int nhoff,u_int16_t l3num,struct net * net,struct nf_conntrack_tuple * tuple)190 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
191 		       u_int16_t l3num,
192 		       struct net *net, struct nf_conntrack_tuple *tuple)
193 {
194 	struct nf_conntrack_l3proto *l3proto;
195 	struct nf_conntrack_l4proto *l4proto;
196 	unsigned int protoff;
197 	u_int8_t protonum;
198 	int ret;
199 
200 	rcu_read_lock();
201 
202 	l3proto = __nf_ct_l3proto_find(l3num);
203 	ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
204 	if (ret != NF_ACCEPT) {
205 		rcu_read_unlock();
206 		return false;
207 	}
208 
209 	l4proto = __nf_ct_l4proto_find(l3num, protonum);
210 
211 	ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple,
212 			      l3proto, l4proto);
213 
214 	rcu_read_unlock();
215 	return ret;
216 }
217 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
218 
219 bool
nf_ct_invert_tuple(struct nf_conntrack_tuple * inverse,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_l3proto * l3proto,const struct nf_conntrack_l4proto * l4proto)220 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
221 		   const struct nf_conntrack_tuple *orig,
222 		   const struct nf_conntrack_l3proto *l3proto,
223 		   const struct nf_conntrack_l4proto *l4proto)
224 {
225 	memset(inverse, 0, sizeof(*inverse));
226 
227 	inverse->src.l3num = orig->src.l3num;
228 	if (l3proto->invert_tuple(inverse, orig) == 0)
229 		return false;
230 
231 	inverse->dst.dir = !orig->dst.dir;
232 
233 	inverse->dst.protonum = orig->dst.protonum;
234 	return l4proto->invert_tuple(inverse, orig);
235 }
236 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
237 
238 /* Generate a almost-unique pseudo-id for a given conntrack.
239  *
240  * intentionally doesn't re-use any of the seeds used for hash
241  * table location, we assume id gets exposed to userspace.
242  *
243  * Following nf_conn items do not change throughout lifetime
244  * of the nf_conn:
245  *
246  * 1. nf_conn address
247  * 2. nf_conn->master address (normally NULL)
248  * 3. the associated net namespace
249  * 4. the original direction tuple
250  */
nf_ct_get_id(const struct nf_conn * ct)251 u32 nf_ct_get_id(const struct nf_conn *ct)
252 {
253 	static __read_mostly siphash_key_t ct_id_seed;
254 	unsigned long a, b, c, d;
255 
256 	net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
257 
258 	a = (unsigned long)ct;
259 	b = (unsigned long)ct->master;
260 	c = (unsigned long)nf_ct_net(ct);
261 	d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
262 				   sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
263 				   &ct_id_seed);
264 #ifdef CONFIG_64BIT
265 	return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
266 #else
267 	return siphash_4u32((u32)a, (u32)b, (u32)c, (u32)d, &ct_id_seed);
268 #endif
269 }
270 EXPORT_SYMBOL_GPL(nf_ct_get_id);
271 
272 static void
clean_from_lists(struct nf_conn * ct)273 clean_from_lists(struct nf_conn *ct)
274 {
275 	pr_debug("clean_from_lists(%p)\n", ct);
276 	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
277 	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
278 
279 	/* Destroy all pending expectations */
280 	nf_ct_remove_expectations(ct);
281 }
282 
283 /* must be called with local_bh_disable */
nf_ct_add_to_dying_list(struct nf_conn * ct)284 static void nf_ct_add_to_dying_list(struct nf_conn *ct)
285 {
286 	struct ct_pcpu *pcpu;
287 
288 	/* add this conntrack to the (per cpu) dying list */
289 	ct->cpu = smp_processor_id();
290 	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
291 
292 	spin_lock(&pcpu->lock);
293 	hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
294 			     &pcpu->dying);
295 	spin_unlock(&pcpu->lock);
296 }
297 
298 /* must be called with local_bh_disable */
nf_ct_add_to_unconfirmed_list(struct nf_conn * ct)299 static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
300 {
301 	struct ct_pcpu *pcpu;
302 
303 	/* add this conntrack to the (per cpu) unconfirmed list */
304 	ct->cpu = smp_processor_id();
305 	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
306 
307 	spin_lock(&pcpu->lock);
308 	hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
309 			     &pcpu->unconfirmed);
310 	spin_unlock(&pcpu->lock);
311 }
312 
313 /* must be called with local_bh_disable */
nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn * ct)314 static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
315 {
316 	struct ct_pcpu *pcpu;
317 
318 	/* We overload first tuple to link into unconfirmed or dying list.*/
319 	pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
320 
321 	spin_lock(&pcpu->lock);
322 	BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode));
323 	hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
324 	spin_unlock(&pcpu->lock);
325 }
326 
327 /* Released via destroy_conntrack() */
nf_ct_tmpl_alloc(struct net * net,const struct nf_conntrack_zone * zone,gfp_t flags)328 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
329 				 const struct nf_conntrack_zone *zone,
330 				 gfp_t flags)
331 {
332 	struct nf_conn *tmpl;
333 
334 	tmpl = kzalloc(sizeof(*tmpl), flags);
335 	if (tmpl == NULL)
336 		return NULL;
337 
338 	tmpl->status = IPS_TEMPLATE;
339 	write_pnet(&tmpl->ct_net, net);
340 
341 	if (nf_ct_zone_add(tmpl, flags, zone) < 0)
342 		goto out_free;
343 
344 	atomic_set(&tmpl->ct_general.use, 0);
345 
346 	return tmpl;
347 out_free:
348 	kfree(tmpl);
349 	return NULL;
350 }
351 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
352 
nf_ct_tmpl_free(struct nf_conn * tmpl)353 void nf_ct_tmpl_free(struct nf_conn *tmpl)
354 {
355 	nf_ct_ext_destroy(tmpl);
356 	nf_ct_ext_free(tmpl);
357 	kfree(tmpl);
358 }
359 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
360 
361 static void
destroy_conntrack(struct nf_conntrack * nfct)362 destroy_conntrack(struct nf_conntrack *nfct)
363 {
364 	struct nf_conn *ct = (struct nf_conn *)nfct;
365 	struct net *net = nf_ct_net(ct);
366 	struct nf_conntrack_l4proto *l4proto;
367 
368 	pr_debug("destroy_conntrack(%p)\n", ct);
369 	NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
370 	NF_CT_ASSERT(!timer_pending(&ct->timeout));
371 
372 	if (unlikely(nf_ct_is_template(ct))) {
373 		nf_ct_tmpl_free(ct);
374 		return;
375 	}
376 	rcu_read_lock();
377 	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
378 	if (l4proto && l4proto->destroy)
379 		l4proto->destroy(ct);
380 
381 	rcu_read_unlock();
382 
383 	local_bh_disable();
384 	/* Expectations will have been removed in clean_from_lists,
385 	 * except TFTP can create an expectation on the first packet,
386 	 * before connection is in the list, so we need to clean here,
387 	 * too.
388 	 */
389 	nf_ct_remove_expectations(ct);
390 
391 	nf_ct_del_from_dying_or_unconfirmed_list(ct);
392 
393 	NF_CT_STAT_INC(net, delete);
394 	local_bh_enable();
395 
396 	if (ct->master)
397 		nf_ct_put(ct->master);
398 
399 	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
400 	nf_conntrack_free(ct);
401 }
402 
nf_ct_delete_from_lists(struct nf_conn * ct)403 static void nf_ct_delete_from_lists(struct nf_conn *ct)
404 {
405 	struct net *net = nf_ct_net(ct);
406 	unsigned int hash, reply_hash;
407 	unsigned int sequence;
408 
409 	nf_ct_helper_destroy(ct);
410 
411 	local_bh_disable();
412 	do {
413 		sequence = read_seqcount_begin(&net->ct.generation);
414 		hash = hash_conntrack(net,
415 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
416 		reply_hash = hash_conntrack(net,
417 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
418 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
419 
420 	clean_from_lists(ct);
421 	nf_conntrack_double_unlock(hash, reply_hash);
422 
423 	nf_ct_add_to_dying_list(ct);
424 
425 	NF_CT_STAT_INC(net, delete_list);
426 	local_bh_enable();
427 }
428 
nf_ct_delete(struct nf_conn * ct,u32 portid,int report)429 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
430 {
431 	struct nf_conn_tstamp *tstamp;
432 
433 	tstamp = nf_conn_tstamp_find(ct);
434 	if (tstamp && tstamp->stop == 0)
435 		tstamp->stop = ktime_get_real_ns();
436 
437 	if (nf_ct_is_dying(ct))
438 		goto delete;
439 
440 	if (nf_conntrack_event_report(IPCT_DESTROY, ct,
441 				    portid, report) < 0) {
442 		/* destroy event was not delivered */
443 		nf_ct_delete_from_lists(ct);
444 		nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
445 		return false;
446 	}
447 
448 	nf_conntrack_ecache_work(nf_ct_net(ct));
449 	set_bit(IPS_DYING_BIT, &ct->status);
450  delete:
451 	nf_ct_delete_from_lists(ct);
452 	nf_ct_put(ct);
453 	return true;
454 }
455 EXPORT_SYMBOL_GPL(nf_ct_delete);
456 
death_by_timeout(unsigned long ul_conntrack)457 static void death_by_timeout(unsigned long ul_conntrack)
458 {
459 	nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
460 }
461 
462 static inline bool
nf_ct_key_equal(struct nf_conntrack_tuple_hash * h,const struct nf_conntrack_tuple * tuple,const struct nf_conntrack_zone * zone)463 nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
464 		const struct nf_conntrack_tuple *tuple,
465 		const struct nf_conntrack_zone *zone)
466 {
467 	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
468 
469 	/* A conntrack can be recreated with the equal tuple,
470 	 * so we need to check that the conntrack is confirmed
471 	 */
472 	return nf_ct_tuple_equal(tuple, &h->tuple) &&
473 	       nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
474 	       nf_ct_is_confirmed(ct);
475 }
476 
477 /*
478  * Warning :
479  * - Caller must take a reference on returned object
480  *   and recheck nf_ct_tuple_equal(tuple, &h->tuple)
481  */
482 static struct nf_conntrack_tuple_hash *
____nf_conntrack_find(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple,u32 hash)483 ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
484 		      const struct nf_conntrack_tuple *tuple, u32 hash)
485 {
486 	struct nf_conntrack_tuple_hash *h;
487 	struct hlist_nulls_node *n;
488 	unsigned int bucket = hash_bucket(hash, net);
489 
490 	/* Disable BHs the entire time since we normally need to disable them
491 	 * at least once for the stats anyway.
492 	 */
493 	local_bh_disable();
494 begin:
495 	hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
496 		if (nf_ct_key_equal(h, tuple, zone)) {
497 			NF_CT_STAT_INC(net, found);
498 			local_bh_enable();
499 			return h;
500 		}
501 		NF_CT_STAT_INC(net, searched);
502 	}
503 	/*
504 	 * if the nulls value we got at the end of this lookup is
505 	 * not the expected one, we must restart lookup.
506 	 * We probably met an item that was moved to another chain.
507 	 */
508 	if (get_nulls_value(n) != bucket) {
509 		NF_CT_STAT_INC(net, search_restart);
510 		goto begin;
511 	}
512 	local_bh_enable();
513 
514 	return NULL;
515 }
516 
517 /* Find a connection corresponding to a tuple. */
518 static struct nf_conntrack_tuple_hash *
__nf_conntrack_find_get(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple,u32 hash)519 __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
520 			const struct nf_conntrack_tuple *tuple, u32 hash)
521 {
522 	struct nf_conntrack_tuple_hash *h;
523 	struct nf_conn *ct;
524 
525 	rcu_read_lock();
526 begin:
527 	h = ____nf_conntrack_find(net, zone, tuple, hash);
528 	if (h) {
529 		ct = nf_ct_tuplehash_to_ctrack(h);
530 		if (unlikely(nf_ct_is_dying(ct) ||
531 			     !atomic_inc_not_zero(&ct->ct_general.use)))
532 			h = NULL;
533 		else {
534 			if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
535 				nf_ct_put(ct);
536 				goto begin;
537 			}
538 		}
539 	}
540 	rcu_read_unlock();
541 
542 	return h;
543 }
544 
545 struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * tuple)546 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
547 		      const struct nf_conntrack_tuple *tuple)
548 {
549 	return __nf_conntrack_find_get(net, zone, tuple,
550 				       hash_conntrack_raw(tuple));
551 }
552 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
553 
__nf_conntrack_hash_insert(struct nf_conn * ct,unsigned int hash,unsigned int reply_hash)554 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
555 				       unsigned int hash,
556 				       unsigned int reply_hash)
557 {
558 	struct net *net = nf_ct_net(ct);
559 
560 	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
561 			   &net->ct.hash[hash]);
562 	hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
563 			   &net->ct.hash[reply_hash]);
564 }
565 
566 int
nf_conntrack_hash_check_insert(struct nf_conn * ct)567 nf_conntrack_hash_check_insert(struct nf_conn *ct)
568 {
569 	const struct nf_conntrack_zone *zone;
570 	struct net *net = nf_ct_net(ct);
571 	unsigned int hash, reply_hash;
572 	struct nf_conntrack_tuple_hash *h;
573 	struct hlist_nulls_node *n;
574 	unsigned int sequence;
575 
576 	zone = nf_ct_zone(ct);
577 
578 	local_bh_disable();
579 	do {
580 		sequence = read_seqcount_begin(&net->ct.generation);
581 		hash = hash_conntrack(net,
582 				      &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
583 		reply_hash = hash_conntrack(net,
584 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
585 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
586 
587 	/* See if there's one in the list already, including reverse */
588 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
589 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
590 				      &h->tuple) &&
591 		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
592 				     NF_CT_DIRECTION(h)))
593 			goto out;
594 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
595 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
596 				      &h->tuple) &&
597 		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
598 				     NF_CT_DIRECTION(h)))
599 			goto out;
600 
601 	add_timer(&ct->timeout);
602 	smp_wmb();
603 	/* The caller holds a reference to this object */
604 	atomic_set(&ct->ct_general.use, 2);
605 	__nf_conntrack_hash_insert(ct, hash, reply_hash);
606 	nf_conntrack_double_unlock(hash, reply_hash);
607 	NF_CT_STAT_INC(net, insert);
608 	local_bh_enable();
609 	return 0;
610 
611 out:
612 	nf_conntrack_double_unlock(hash, reply_hash);
613 	NF_CT_STAT_INC(net, insert_failed);
614 	local_bh_enable();
615 	return -EEXIST;
616 }
617 EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
618 
619 /* Confirm a connection given skb; places it in hash table */
620 int
__nf_conntrack_confirm(struct sk_buff * skb)621 __nf_conntrack_confirm(struct sk_buff *skb)
622 {
623 	const struct nf_conntrack_zone *zone;
624 	unsigned int hash, reply_hash;
625 	struct nf_conntrack_tuple_hash *h;
626 	struct nf_conn *ct;
627 	struct nf_conn_help *help;
628 	struct nf_conn_tstamp *tstamp;
629 	struct hlist_nulls_node *n;
630 	enum ip_conntrack_info ctinfo;
631 	struct net *net;
632 	unsigned int sequence;
633 
634 	ct = nf_ct_get(skb, &ctinfo);
635 	net = nf_ct_net(ct);
636 
637 	/* ipt_REJECT uses nf_conntrack_attach to attach related
638 	   ICMP/TCP RST packets in other direction.  Actual packet
639 	   which created connection will be IP_CT_NEW or for an
640 	   expected connection, IP_CT_RELATED. */
641 	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
642 		return NF_ACCEPT;
643 
644 	zone = nf_ct_zone(ct);
645 	local_bh_disable();
646 
647 	do {
648 		sequence = read_seqcount_begin(&net->ct.generation);
649 		/* reuse the hash saved before */
650 		hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
651 		hash = hash_bucket(hash, net);
652 		reply_hash = hash_conntrack(net,
653 					   &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
654 
655 	} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
656 
657 	/* We're not in hash table, and we refuse to set up related
658 	 * connections for unconfirmed conns.  But packet copies and
659 	 * REJECT will give spurious warnings here.
660 	 */
661 	/* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
662 
663 	/* No external references means no one else could have
664 	 * confirmed us.
665 	 */
666 	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
667 	pr_debug("Confirming conntrack %p\n", ct);
668 	/* We have to check the DYING flag after unlink to prevent
669 	 * a race against nf_ct_get_next_corpse() possibly called from
670 	 * user context, else we insert an already 'dead' hash, blocking
671 	 * further use of that particular connection -JM.
672 	 */
673 	nf_ct_del_from_dying_or_unconfirmed_list(ct);
674 
675 	if (unlikely(nf_ct_is_dying(ct)))
676 		goto out;
677 
678 	/* See if there's one in the list already, including reverse:
679 	   NAT could have grabbed it without realizing, since we're
680 	   not in the hash.  If there is, we lost race. */
681 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[hash], hnnode)
682 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
683 				      &h->tuple) &&
684 		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
685 				     NF_CT_DIRECTION(h)))
686 			goto out;
687 	hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
688 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
689 				      &h->tuple) &&
690 		    nf_ct_zone_equal(nf_ct_tuplehash_to_ctrack(h), zone,
691 				     NF_CT_DIRECTION(h)))
692 			goto out;
693 
694 	/* Timer relative to confirmation time, not original
695 	   setting time, otherwise we'd get timer wrap in
696 	   weird delay cases. */
697 	ct->timeout.expires += jiffies;
698 	add_timer(&ct->timeout);
699 	atomic_inc(&ct->ct_general.use);
700 	ct->status |= IPS_CONFIRMED;
701 
702 	/* set conntrack timestamp, if enabled. */
703 	tstamp = nf_conn_tstamp_find(ct);
704 	if (tstamp) {
705 		if (skb->tstamp.tv64 == 0)
706 			__net_timestamp(skb);
707 
708 		tstamp->start = ktime_to_ns(skb->tstamp);
709 	}
710 	/* Since the lookup is lockless, hash insertion must be done after
711 	 * starting the timer and setting the CONFIRMED bit. The RCU barriers
712 	 * guarantee that no other CPU can find the conntrack before the above
713 	 * stores are visible.
714 	 */
715 	__nf_conntrack_hash_insert(ct, hash, reply_hash);
716 	nf_conntrack_double_unlock(hash, reply_hash);
717 	NF_CT_STAT_INC(net, insert);
718 	local_bh_enable();
719 
720 	help = nfct_help(ct);
721 	if (help && help->helper)
722 		nf_conntrack_event_cache(IPCT_HELPER, ct);
723 
724 	nf_conntrack_event_cache(master_ct(ct) ?
725 				 IPCT_RELATED : IPCT_NEW, ct);
726 	return NF_ACCEPT;
727 
728 out:
729 	nf_ct_add_to_dying_list(ct);
730 	nf_conntrack_double_unlock(hash, reply_hash);
731 	NF_CT_STAT_INC(net, insert_failed);
732 	local_bh_enable();
733 	return NF_DROP;
734 }
735 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
736 
737 /* Returns true if a connection correspondings to the tuple (required
738    for NAT). */
739 int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple * tuple,const struct nf_conn * ignored_conntrack)740 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
741 			 const struct nf_conn *ignored_conntrack)
742 {
743 	struct net *net = nf_ct_net(ignored_conntrack);
744 	const struct nf_conntrack_zone *zone;
745 	struct nf_conntrack_tuple_hash *h;
746 	struct hlist_nulls_node *n;
747 	struct nf_conn *ct;
748 	unsigned int hash;
749 
750 	zone = nf_ct_zone(ignored_conntrack);
751 	hash = hash_conntrack(net, tuple);
752 
753 	/* Disable BHs the entire time since we need to disable them at
754 	 * least once for the stats anyway.
755 	 */
756 	rcu_read_lock_bh();
757  begin:
758 	hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
759 		ct = nf_ct_tuplehash_to_ctrack(h);
760 		if (ct != ignored_conntrack &&
761 		    nf_ct_tuple_equal(tuple, &h->tuple) &&
762 		    nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h))) {
763 			NF_CT_STAT_INC(net, found);
764 			rcu_read_unlock_bh();
765 			return 1;
766 		}
767 		NF_CT_STAT_INC(net, searched);
768 	}
769 
770 	if (get_nulls_value(n) != hash) {
771 		NF_CT_STAT_INC(net, search_restart);
772 		goto begin;
773 	}
774 
775 	rcu_read_unlock_bh();
776 
777 	return 0;
778 }
779 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
780 
781 #define NF_CT_EVICTION_RANGE	8
782 
783 /* There's a small race here where we may free a just-assured
784    connection.  Too bad: we're in trouble anyway. */
early_drop(struct net * net,unsigned int _hash)785 static noinline int early_drop(struct net *net, unsigned int _hash)
786 {
787 	/* Use oldest entry, which is roughly LRU */
788 	struct nf_conntrack_tuple_hash *h;
789 	struct nf_conn *ct = NULL, *tmp;
790 	struct hlist_nulls_node *n;
791 	unsigned int i = 0, cnt = 0;
792 	int dropped = 0;
793 	unsigned int hash, sequence;
794 	spinlock_t *lockp;
795 
796 	local_bh_disable();
797 restart:
798 	sequence = read_seqcount_begin(&net->ct.generation);
799 	hash = hash_bucket(_hash, net);
800 	for (; i < net->ct.htable_size; i++) {
801 		lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
802 		spin_lock(lockp);
803 		if (read_seqcount_retry(&net->ct.generation, sequence)) {
804 			spin_unlock(lockp);
805 			goto restart;
806 		}
807 		hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
808 					 hnnode) {
809 			tmp = nf_ct_tuplehash_to_ctrack(h);
810 			if (!test_bit(IPS_ASSURED_BIT, &tmp->status) &&
811 			    !nf_ct_is_dying(tmp) &&
812 			    atomic_inc_not_zero(&tmp->ct_general.use)) {
813 				ct = tmp;
814 				break;
815 			}
816 			cnt++;
817 		}
818 
819 		hash = (hash + 1) % net->ct.htable_size;
820 		spin_unlock(lockp);
821 
822 		if (ct || cnt >= NF_CT_EVICTION_RANGE)
823 			break;
824 
825 	}
826 	local_bh_enable();
827 
828 	if (!ct)
829 		return dropped;
830 
831 	if (del_timer(&ct->timeout)) {
832 		if (nf_ct_delete(ct, 0, 0)) {
833 			dropped = 1;
834 			NF_CT_STAT_INC_ATOMIC(net, early_drop);
835 		}
836 	}
837 	nf_ct_put(ct);
838 	return dropped;
839 }
840 
init_nf_conntrack_hash_rnd(void)841 void init_nf_conntrack_hash_rnd(void)
842 {
843 	unsigned int rand;
844 
845 	/*
846 	 * Why not initialize nf_conntrack_rnd in a "init()" function ?
847 	 * Because there isn't enough entropy when system initializing,
848 	 * and we initialize it as late as possible.
849 	 */
850 	do {
851 		get_random_bytes(&rand, sizeof(rand));
852 	} while (!rand);
853 	cmpxchg(&nf_conntrack_hash_rnd, 0, rand);
854 }
855 
856 static struct nf_conn *
__nf_conntrack_alloc(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_tuple * repl,gfp_t gfp,u32 hash)857 __nf_conntrack_alloc(struct net *net,
858 		     const struct nf_conntrack_zone *zone,
859 		     const struct nf_conntrack_tuple *orig,
860 		     const struct nf_conntrack_tuple *repl,
861 		     gfp_t gfp, u32 hash)
862 {
863 	struct nf_conn *ct;
864 
865 	if (unlikely(!nf_conntrack_hash_rnd)) {
866 		init_nf_conntrack_hash_rnd();
867 		/* recompute the hash as nf_conntrack_hash_rnd is initialized */
868 		hash = hash_conntrack_raw(orig);
869 	}
870 
871 	/* We don't want any race condition at early drop stage */
872 	atomic_inc(&net->ct.count);
873 
874 	if (nf_conntrack_max &&
875 	    unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
876 		if (!early_drop(net, hash)) {
877 			atomic_dec(&net->ct.count);
878 			net_warn_ratelimited("nf_conntrack: table full, dropping packet\n");
879 			return ERR_PTR(-ENOMEM);
880 		}
881 	}
882 
883 	/*
884 	 * Do not use kmem_cache_zalloc(), as this cache uses
885 	 * SLAB_DESTROY_BY_RCU.
886 	 */
887 	ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
888 	if (ct == NULL)
889 		goto out;
890 
891 	spin_lock_init(&ct->lock);
892 	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
893 	ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode.pprev = NULL;
894 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
895 	/* save hash for reusing when confirming */
896 	*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
897 	ct->status = 0;
898 	/* Don't set timer yet: wait for confirmation */
899 	setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
900 	write_pnet(&ct->ct_net, net);
901 	memset(&ct->__nfct_init_offset, 0,
902 	       offsetof(struct nf_conn, proto) -
903 	       offsetof(struct nf_conn, __nfct_init_offset));
904 
905 	if (zone && nf_ct_zone_add(ct, GFP_ATOMIC, zone) < 0)
906 		goto out_free;
907 
908 	/* Because we use RCU lookups, we set ct_general.use to zero before
909 	 * this is inserted in any list.
910 	 */
911 	atomic_set(&ct->ct_general.use, 0);
912 	return ct;
913 out_free:
914 	kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
915 out:
916 	atomic_dec(&net->ct.count);
917 	return ERR_PTR(-ENOMEM);
918 }
919 
nf_conntrack_alloc(struct net * net,const struct nf_conntrack_zone * zone,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_tuple * repl,gfp_t gfp)920 struct nf_conn *nf_conntrack_alloc(struct net *net,
921 				   const struct nf_conntrack_zone *zone,
922 				   const struct nf_conntrack_tuple *orig,
923 				   const struct nf_conntrack_tuple *repl,
924 				   gfp_t gfp)
925 {
926 	return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
927 }
928 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
929 
nf_conntrack_free(struct nf_conn * ct)930 void nf_conntrack_free(struct nf_conn *ct)
931 {
932 	struct net *net = nf_ct_net(ct);
933 
934 	/* A freed object has refcnt == 0, that's
935 	 * the golden rule for SLAB_DESTROY_BY_RCU
936 	 */
937 	NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
938 
939 	nf_ct_ext_destroy(ct);
940 	nf_ct_ext_free(ct);
941 	kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
942 	smp_mb__before_atomic();
943 	atomic_dec(&net->ct.count);
944 }
945 EXPORT_SYMBOL_GPL(nf_conntrack_free);
946 
947 
948 /* Allocate a new conntrack: we return -ENOMEM if classification
949    failed due to stress.  Otherwise it really is unclassifiable. */
950 static struct nf_conntrack_tuple_hash *
init_conntrack(struct net * net,struct nf_conn * tmpl,const struct nf_conntrack_tuple * tuple,struct nf_conntrack_l3proto * l3proto,struct nf_conntrack_l4proto * l4proto,struct sk_buff * skb,unsigned int dataoff,u32 hash)951 init_conntrack(struct net *net, struct nf_conn *tmpl,
952 	       const struct nf_conntrack_tuple *tuple,
953 	       struct nf_conntrack_l3proto *l3proto,
954 	       struct nf_conntrack_l4proto *l4proto,
955 	       struct sk_buff *skb,
956 	       unsigned int dataoff, u32 hash)
957 {
958 	struct nf_conn *ct;
959 	struct nf_conn_help *help;
960 	struct nf_conntrack_tuple repl_tuple;
961 	struct nf_conntrack_ecache *ecache;
962 	struct nf_conntrack_expect *exp = NULL;
963 	const struct nf_conntrack_zone *zone;
964 	struct nf_conn_timeout *timeout_ext;
965 	struct nf_conntrack_zone tmp;
966 	unsigned int *timeouts;
967 
968 	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
969 		pr_debug("Can't invert tuple.\n");
970 		return NULL;
971 	}
972 
973 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
974 	ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
975 				  hash);
976 	if (IS_ERR(ct))
977 		return (struct nf_conntrack_tuple_hash *)ct;
978 
979 	if (tmpl && nfct_synproxy(tmpl)) {
980 		nfct_seqadj_ext_add(ct);
981 		nfct_synproxy_ext_add(ct);
982 	}
983 
984 	timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
985 	if (timeout_ext) {
986 		timeouts = nf_ct_timeout_data(timeout_ext);
987 		if (unlikely(!timeouts))
988 			timeouts = l4proto->get_timeouts(net);
989 	} else {
990 		timeouts = l4proto->get_timeouts(net);
991 	}
992 
993 	if (!l4proto->new(ct, skb, dataoff, timeouts)) {
994 		nf_conntrack_free(ct);
995 		pr_debug("init conntrack: can't track with proto module\n");
996 		return NULL;
997 	}
998 
999 	if (timeout_ext)
1000 		nf_ct_timeout_ext_add(ct, rcu_dereference(timeout_ext->timeout),
1001 				      GFP_ATOMIC);
1002 
1003 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
1004 	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1005 	nf_ct_labels_ext_add(ct);
1006 
1007 	ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
1008 	nf_ct_ecache_ext_add(ct, ecache ? ecache->ctmask : 0,
1009 				 ecache ? ecache->expmask : 0,
1010 			     GFP_ATOMIC);
1011 
1012 	local_bh_disable();
1013 	if (net->ct.expect_count) {
1014 		spin_lock(&nf_conntrack_expect_lock);
1015 		exp = nf_ct_find_expectation(net, zone, tuple);
1016 		if (exp) {
1017 			pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
1018 				 ct, exp);
1019 			/* Welcome, Mr. Bond.  We've been expecting you... */
1020 			__set_bit(IPS_EXPECTED_BIT, &ct->status);
1021 			/* exp->master safe, refcnt bumped in nf_ct_find_expectation */
1022 			ct->master = exp->master;
1023 			if (exp->helper) {
1024 				help = nf_ct_helper_ext_add(ct, exp->helper,
1025 							    GFP_ATOMIC);
1026 				if (help)
1027 					rcu_assign_pointer(help->helper, exp->helper);
1028 			}
1029 
1030 #ifdef CONFIG_NF_CONNTRACK_MARK
1031 			ct->mark = exp->master->mark;
1032 #endif
1033 #ifdef CONFIG_NF_CONNTRACK_SECMARK
1034 			ct->secmark = exp->master->secmark;
1035 #endif
1036 			NF_CT_STAT_INC(net, expect_new);
1037 		}
1038 		spin_unlock(&nf_conntrack_expect_lock);
1039 	}
1040 	if (!exp) {
1041 		__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
1042 		NF_CT_STAT_INC(net, new);
1043 	}
1044 
1045 	/* Now it is inserted into the unconfirmed list, bump refcount */
1046 	nf_conntrack_get(&ct->ct_general);
1047 	nf_ct_add_to_unconfirmed_list(ct);
1048 
1049 	local_bh_enable();
1050 
1051 	if (exp) {
1052 		if (exp->expectfn)
1053 			exp->expectfn(ct, exp);
1054 		nf_ct_expect_put(exp);
1055 	}
1056 
1057 	return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
1058 }
1059 
1060 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
1061 static inline struct nf_conn *
resolve_normal_ct(struct net * net,struct nf_conn * tmpl,struct sk_buff * skb,unsigned int dataoff,u_int16_t l3num,u_int8_t protonum,struct nf_conntrack_l3proto * l3proto,struct nf_conntrack_l4proto * l4proto,int * set_reply,enum ip_conntrack_info * ctinfo)1062 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
1063 		  struct sk_buff *skb,
1064 		  unsigned int dataoff,
1065 		  u_int16_t l3num,
1066 		  u_int8_t protonum,
1067 		  struct nf_conntrack_l3proto *l3proto,
1068 		  struct nf_conntrack_l4proto *l4proto,
1069 		  int *set_reply,
1070 		  enum ip_conntrack_info *ctinfo)
1071 {
1072 	const struct nf_conntrack_zone *zone;
1073 	struct nf_conntrack_tuple tuple;
1074 	struct nf_conntrack_tuple_hash *h;
1075 	struct nf_conntrack_zone tmp;
1076 	struct nf_conn *ct;
1077 	u32 hash;
1078 
1079 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
1080 			     dataoff, l3num, protonum, net, &tuple, l3proto,
1081 			     l4proto)) {
1082 		pr_debug("resolve_normal_ct: Can't get tuple\n");
1083 		return NULL;
1084 	}
1085 
1086 	/* look for tuple match */
1087 	zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
1088 	hash = hash_conntrack_raw(&tuple);
1089 	h = __nf_conntrack_find_get(net, zone, &tuple, hash);
1090 	if (!h) {
1091 		h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
1092 				   skb, dataoff, hash);
1093 		if (!h)
1094 			return NULL;
1095 		if (IS_ERR(h))
1096 			return (void *)h;
1097 	}
1098 	ct = nf_ct_tuplehash_to_ctrack(h);
1099 
1100 	/* It exists; we have (non-exclusive) reference. */
1101 	if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
1102 		*ctinfo = IP_CT_ESTABLISHED_REPLY;
1103 		/* Please set reply bit if this packet OK */
1104 		*set_reply = 1;
1105 	} else {
1106 		/* Once we've had two way comms, always ESTABLISHED. */
1107 		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
1108 			pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
1109 			*ctinfo = IP_CT_ESTABLISHED;
1110 		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
1111 			pr_debug("nf_conntrack_in: related packet for %p\n",
1112 				 ct);
1113 			*ctinfo = IP_CT_RELATED;
1114 		} else {
1115 			pr_debug("nf_conntrack_in: new packet for %p\n", ct);
1116 			*ctinfo = IP_CT_NEW;
1117 		}
1118 		*set_reply = 0;
1119 	}
1120 	skb->nfct = &ct->ct_general;
1121 	skb->nfctinfo = *ctinfo;
1122 	return ct;
1123 }
1124 
1125 unsigned int
nf_conntrack_in(struct net * net,u_int8_t pf,unsigned int hooknum,struct sk_buff * skb)1126 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
1127 		struct sk_buff *skb)
1128 {
1129 	struct nf_conn *ct, *tmpl = NULL;
1130 	enum ip_conntrack_info ctinfo;
1131 	struct nf_conntrack_l3proto *l3proto;
1132 	struct nf_conntrack_l4proto *l4proto;
1133 	unsigned int *timeouts;
1134 	unsigned int dataoff;
1135 	u_int8_t protonum;
1136 	int set_reply = 0;
1137 	int ret;
1138 
1139 	if (skb->nfct) {
1140 		/* Previously seen (loopback or untracked)?  Ignore. */
1141 		tmpl = (struct nf_conn *)skb->nfct;
1142 		if (!nf_ct_is_template(tmpl)) {
1143 			NF_CT_STAT_INC_ATOMIC(net, ignore);
1144 			return NF_ACCEPT;
1145 		}
1146 		skb->nfct = NULL;
1147 	}
1148 
1149 	/* rcu_read_lock()ed by nf_hook_slow */
1150 	l3proto = __nf_ct_l3proto_find(pf);
1151 	ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
1152 				   &dataoff, &protonum);
1153 	if (ret <= 0) {
1154 		pr_debug("not prepared to track yet or error occurred\n");
1155 		NF_CT_STAT_INC_ATOMIC(net, error);
1156 		NF_CT_STAT_INC_ATOMIC(net, invalid);
1157 		ret = -ret;
1158 		goto out;
1159 	}
1160 
1161 	l4proto = __nf_ct_l4proto_find(pf, protonum);
1162 
1163 	/* It may be an special packet, error, unclean...
1164 	 * inverse of the return code tells to the netfilter
1165 	 * core what to do with the packet. */
1166 	if (l4proto->error != NULL) {
1167 		ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
1168 				     pf, hooknum);
1169 		if (ret <= 0) {
1170 			NF_CT_STAT_INC_ATOMIC(net, error);
1171 			NF_CT_STAT_INC_ATOMIC(net, invalid);
1172 			ret = -ret;
1173 			goto out;
1174 		}
1175 		/* ICMP[v6] protocol trackers may assign one conntrack. */
1176 		if (skb->nfct)
1177 			goto out;
1178 	}
1179 
1180 	ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
1181 			       l3proto, l4proto, &set_reply, &ctinfo);
1182 	if (!ct) {
1183 		/* Not valid part of a connection */
1184 		NF_CT_STAT_INC_ATOMIC(net, invalid);
1185 		ret = NF_ACCEPT;
1186 		goto out;
1187 	}
1188 
1189 	if (IS_ERR(ct)) {
1190 		/* Too stressed to deal. */
1191 		NF_CT_STAT_INC_ATOMIC(net, drop);
1192 		ret = NF_DROP;
1193 		goto out;
1194 	}
1195 
1196 	NF_CT_ASSERT(skb->nfct);
1197 
1198 	/* Decide what timeout policy we want to apply to this flow. */
1199 	timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
1200 
1201 	ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
1202 	if (ret <= 0) {
1203 		/* Invalid: inverse of the return code tells
1204 		 * the netfilter core what to do */
1205 		pr_debug("nf_conntrack_in: Can't track with proto module\n");
1206 		nf_conntrack_put(skb->nfct);
1207 		skb->nfct = NULL;
1208 		NF_CT_STAT_INC_ATOMIC(net, invalid);
1209 		if (ret == -NF_DROP)
1210 			NF_CT_STAT_INC_ATOMIC(net, drop);
1211 		ret = -ret;
1212 		goto out;
1213 	}
1214 
1215 	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
1216 		nf_conntrack_event_cache(IPCT_REPLY, ct);
1217 out:
1218 	if (tmpl) {
1219 		/* Special case: we have to repeat this hook, assign the
1220 		 * template again to this packet. We assume that this packet
1221 		 * has no conntrack assigned. This is used by nf_ct_tcp. */
1222 		if (ret == NF_REPEAT)
1223 			skb->nfct = (struct nf_conntrack *)tmpl;
1224 		else
1225 			nf_ct_put(tmpl);
1226 	}
1227 
1228 	return ret;
1229 }
1230 EXPORT_SYMBOL_GPL(nf_conntrack_in);
1231 
nf_ct_invert_tuplepr(struct nf_conntrack_tuple * inverse,const struct nf_conntrack_tuple * orig)1232 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
1233 			  const struct nf_conntrack_tuple *orig)
1234 {
1235 	bool ret;
1236 
1237 	rcu_read_lock();
1238 	ret = nf_ct_invert_tuple(inverse, orig,
1239 				 __nf_ct_l3proto_find(orig->src.l3num),
1240 				 __nf_ct_l4proto_find(orig->src.l3num,
1241 						      orig->dst.protonum));
1242 	rcu_read_unlock();
1243 	return ret;
1244 }
1245 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
1246 
1247 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
1248    implicitly racy: see __nf_conntrack_confirm */
nf_conntrack_alter_reply(struct nf_conn * ct,const struct nf_conntrack_tuple * newreply)1249 void nf_conntrack_alter_reply(struct nf_conn *ct,
1250 			      const struct nf_conntrack_tuple *newreply)
1251 {
1252 	struct nf_conn_help *help = nfct_help(ct);
1253 
1254 	/* Should be unconfirmed, so not in hash table yet */
1255 	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
1256 
1257 	pr_debug("Altering reply tuple of %p to ", ct);
1258 	nf_ct_dump_tuple(newreply);
1259 
1260 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
1261 	if (ct->master || (help && !hlist_empty(&help->expectations)))
1262 		return;
1263 
1264 	rcu_read_lock();
1265 	__nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
1266 	rcu_read_unlock();
1267 }
1268 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
1269 
1270 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
__nf_ct_refresh_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb,unsigned long extra_jiffies,int do_acct)1271 void __nf_ct_refresh_acct(struct nf_conn *ct,
1272 			  enum ip_conntrack_info ctinfo,
1273 			  const struct sk_buff *skb,
1274 			  unsigned long extra_jiffies,
1275 			  int do_acct)
1276 {
1277 	NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
1278 	NF_CT_ASSERT(skb);
1279 
1280 	/* Only update if this is not a fixed timeout */
1281 	if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
1282 		goto acct;
1283 
1284 	/* If not in hash table, timer will not be active yet */
1285 	if (!nf_ct_is_confirmed(ct)) {
1286 		ct->timeout.expires = extra_jiffies;
1287 	} else {
1288 		unsigned long newtime = jiffies + extra_jiffies;
1289 
1290 		/* Only update the timeout if the new timeout is at least
1291 		   HZ jiffies from the old timeout. Need del_timer for race
1292 		   avoidance (may already be dying). */
1293 		if (newtime - ct->timeout.expires >= HZ)
1294 			mod_timer_pending(&ct->timeout, newtime);
1295 	}
1296 
1297 acct:
1298 	if (do_acct) {
1299 		struct nf_conn_acct *acct;
1300 
1301 		acct = nf_conn_acct_find(ct);
1302 		if (acct) {
1303 			struct nf_conn_counter *counter = acct->counter;
1304 
1305 			atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
1306 			atomic64_add(skb->len, &counter[CTINFO2DIR(ctinfo)].bytes);
1307 		}
1308 	}
1309 }
1310 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
1311 
__nf_ct_kill_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb,int do_acct)1312 bool __nf_ct_kill_acct(struct nf_conn *ct,
1313 		       enum ip_conntrack_info ctinfo,
1314 		       const struct sk_buff *skb,
1315 		       int do_acct)
1316 {
1317 	if (do_acct) {
1318 		struct nf_conn_acct *acct;
1319 
1320 		acct = nf_conn_acct_find(ct);
1321 		if (acct) {
1322 			struct nf_conn_counter *counter = acct->counter;
1323 
1324 			atomic64_inc(&counter[CTINFO2DIR(ctinfo)].packets);
1325 			atomic64_add(skb->len - skb_network_offset(skb),
1326 				     &counter[CTINFO2DIR(ctinfo)].bytes);
1327 		}
1328 	}
1329 
1330 	if (del_timer(&ct->timeout)) {
1331 		ct->timeout.function((unsigned long)ct);
1332 		return true;
1333 	}
1334 	return false;
1335 }
1336 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
1337 
1338 #ifdef CONFIG_NF_CONNTRACK_ZONES
1339 static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1340 	.len	= sizeof(struct nf_conntrack_zone),
1341 	.align	= __alignof__(struct nf_conntrack_zone),
1342 	.id	= NF_CT_EXT_ZONE,
1343 };
1344 #endif
1345 
1346 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
1347 
1348 #include <linux/netfilter/nfnetlink.h>
1349 #include <linux/netfilter/nfnetlink_conntrack.h>
1350 #include <linux/mutex.h>
1351 
1352 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
1353  * in ip_conntrack_core, since we don't want the protocols to autoload
1354  * or depend on ctnetlink */
nf_ct_port_tuple_to_nlattr(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)1355 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1356 			       const struct nf_conntrack_tuple *tuple)
1357 {
1358 	if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1359 	    nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1360 		goto nla_put_failure;
1361 	return 0;
1362 
1363 nla_put_failure:
1364 	return -1;
1365 }
1366 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
1367 
1368 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
1369 	[CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
1370 	[CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
1371 };
1372 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
1373 
nf_ct_port_nlattr_to_tuple(struct nlattr * tb[],struct nf_conntrack_tuple * t)1374 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
1375 			       struct nf_conntrack_tuple *t)
1376 {
1377 	if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
1378 		return -EINVAL;
1379 
1380 	t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
1381 	t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
1382 
1383 	return 0;
1384 }
1385 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
1386 
nf_ct_port_nlattr_tuple_size(void)1387 int nf_ct_port_nlattr_tuple_size(void)
1388 {
1389 	return nla_policy_len(nf_ct_port_nla_policy, CTA_PROTO_MAX + 1);
1390 }
1391 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
1392 #endif
1393 
1394 /* Used by ipt_REJECT and ip6t_REJECT. */
nf_conntrack_attach(struct sk_buff * nskb,const struct sk_buff * skb)1395 static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
1396 {
1397 	struct nf_conn *ct;
1398 	enum ip_conntrack_info ctinfo;
1399 
1400 	/* This ICMP is in reverse direction to the packet which caused it */
1401 	ct = nf_ct_get(skb, &ctinfo);
1402 	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1403 		ctinfo = IP_CT_RELATED_REPLY;
1404 	else
1405 		ctinfo = IP_CT_RELATED;
1406 
1407 	/* Attach to new skbuff, and increment count */
1408 	nskb->nfct = &ct->ct_general;
1409 	nskb->nfctinfo = ctinfo;
1410 	nf_conntrack_get(nskb->nfct);
1411 }
1412 
1413 /* Bring out ya dead! */
1414 static struct nf_conn *
get_next_corpse(struct net * net,int (* iter)(struct nf_conn * i,void * data),void * data,unsigned int * bucket)1415 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
1416 		void *data, unsigned int *bucket)
1417 {
1418 	struct nf_conntrack_tuple_hash *h;
1419 	struct nf_conn *ct;
1420 	struct hlist_nulls_node *n;
1421 	int cpu;
1422 	spinlock_t *lockp;
1423 
1424 	for (; *bucket < net->ct.htable_size; (*bucket)++) {
1425 		lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1426 		local_bh_disable();
1427 		spin_lock(lockp);
1428 		if (*bucket < net->ct.htable_size) {
1429 			hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1430 				if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
1431 					continue;
1432 				ct = nf_ct_tuplehash_to_ctrack(h);
1433 				if (iter(ct, data))
1434 					goto found;
1435 			}
1436 		}
1437 		spin_unlock(lockp);
1438 		local_bh_enable();
1439 	}
1440 
1441 	for_each_possible_cpu(cpu) {
1442 		struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1443 
1444 		spin_lock_bh(&pcpu->lock);
1445 		hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
1446 			ct = nf_ct_tuplehash_to_ctrack(h);
1447 			if (iter(ct, data))
1448 				set_bit(IPS_DYING_BIT, &ct->status);
1449 		}
1450 		spin_unlock_bh(&pcpu->lock);
1451 	}
1452 	return NULL;
1453 found:
1454 	atomic_inc(&ct->ct_general.use);
1455 	spin_unlock(lockp);
1456 	local_bh_enable();
1457 	return ct;
1458 }
1459 
nf_ct_iterate_cleanup(struct net * net,int (* iter)(struct nf_conn * i,void * data),void * data,u32 portid,int report)1460 void nf_ct_iterate_cleanup(struct net *net,
1461 			   int (*iter)(struct nf_conn *i, void *data),
1462 			   void *data, u32 portid, int report)
1463 {
1464 	struct nf_conn *ct;
1465 	unsigned int bucket = 0;
1466 
1467 	while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1468 		/* Time to push up daises... */
1469 		if (del_timer(&ct->timeout))
1470 			nf_ct_delete(ct, portid, report);
1471 
1472 		/* ... else the timer will get him soon. */
1473 
1474 		nf_ct_put(ct);
1475 	}
1476 }
1477 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
1478 
kill_all(struct nf_conn * i,void * data)1479 static int kill_all(struct nf_conn *i, void *data)
1480 {
1481 	return 1;
1482 }
1483 
nf_ct_free_hashtable(void * hash,unsigned int size)1484 void nf_ct_free_hashtable(void *hash, unsigned int size)
1485 {
1486 	if (is_vmalloc_addr(hash))
1487 		vfree(hash);
1488 	else
1489 		free_pages((unsigned long)hash,
1490 			   get_order(sizeof(struct hlist_head) * size));
1491 }
1492 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1493 
untrack_refs(void)1494 static int untrack_refs(void)
1495 {
1496 	int cnt = 0, cpu;
1497 
1498 	for_each_possible_cpu(cpu) {
1499 		struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1500 
1501 		cnt += atomic_read(&ct->ct_general.use) - 1;
1502 	}
1503 	return cnt;
1504 }
1505 
nf_conntrack_cleanup_start(void)1506 void nf_conntrack_cleanup_start(void)
1507 {
1508 	RCU_INIT_POINTER(ip_ct_attach, NULL);
1509 }
1510 
nf_conntrack_cleanup_end(void)1511 void nf_conntrack_cleanup_end(void)
1512 {
1513 	RCU_INIT_POINTER(nf_ct_destroy, NULL);
1514 	while (untrack_refs() > 0)
1515 		schedule();
1516 
1517 #ifdef CONFIG_NF_CONNTRACK_ZONES
1518 	nf_ct_extend_unregister(&nf_ct_zone_extend);
1519 #endif
1520 	nf_conntrack_proto_fini();
1521 	nf_conntrack_seqadj_fini();
1522 	nf_conntrack_labels_fini();
1523 	nf_conntrack_helper_fini();
1524 	nf_conntrack_timeout_fini();
1525 	nf_conntrack_ecache_fini();
1526 	nf_conntrack_tstamp_fini();
1527 	nf_conntrack_acct_fini();
1528 	nf_conntrack_expect_fini();
1529 }
1530 
1531 /*
1532  * Mishearing the voices in his head, our hero wonders how he's
1533  * supposed to kill the mall.
1534  */
nf_conntrack_cleanup_net(struct net * net)1535 void nf_conntrack_cleanup_net(struct net *net)
1536 {
1537 	LIST_HEAD(single);
1538 
1539 	list_add(&net->exit_list, &single);
1540 	nf_conntrack_cleanup_net_list(&single);
1541 }
1542 
nf_conntrack_cleanup_net_list(struct list_head * net_exit_list)1543 void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1544 {
1545 	int busy;
1546 	struct net *net;
1547 
1548 	/*
1549 	 * This makes sure all current packets have passed through
1550 	 *  netfilter framework.  Roll on, two-stage module
1551 	 *  delete...
1552 	 */
1553 	synchronize_net();
1554 i_see_dead_people:
1555 	busy = 0;
1556 	list_for_each_entry(net, net_exit_list, exit_list) {
1557 		nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
1558 		if (atomic_read(&net->ct.count) != 0)
1559 			busy = 1;
1560 	}
1561 	if (busy) {
1562 		schedule();
1563 		goto i_see_dead_people;
1564 	}
1565 
1566 	list_for_each_entry(net, net_exit_list, exit_list) {
1567 		nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1568 		nf_conntrack_proto_pernet_fini(net);
1569 		nf_conntrack_helper_pernet_fini(net);
1570 		nf_conntrack_ecache_pernet_fini(net);
1571 		nf_conntrack_tstamp_pernet_fini(net);
1572 		nf_conntrack_acct_pernet_fini(net);
1573 		nf_conntrack_expect_pernet_fini(net);
1574 		kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1575 		kfree(net->ct.slabname);
1576 		free_percpu(net->ct.stat);
1577 		free_percpu(net->ct.pcpu_lists);
1578 	}
1579 }
1580 
nf_ct_alloc_hashtable(unsigned int * sizep,int nulls)1581 void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1582 {
1583 	struct hlist_nulls_head *hash;
1584 	unsigned int nr_slots, i;
1585 	size_t sz;
1586 
1587 	BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
1588 	nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
1589 	sz = nr_slots * sizeof(struct hlist_nulls_head);
1590 	hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1591 					get_order(sz));
1592 	if (!hash)
1593 		hash = vzalloc(sz);
1594 
1595 	if (hash && nulls)
1596 		for (i = 0; i < nr_slots; i++)
1597 			INIT_HLIST_NULLS_HEAD(&hash[i], i);
1598 
1599 	return hash;
1600 }
1601 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1602 
nf_conntrack_set_hashsize(const char * val,struct kernel_param * kp)1603 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1604 {
1605 	int i, bucket, rc;
1606 	unsigned int hashsize, old_size;
1607 	struct hlist_nulls_head *hash, *old_hash;
1608 	struct nf_conntrack_tuple_hash *h;
1609 	struct nf_conn *ct;
1610 
1611 	if (current->nsproxy->net_ns != &init_net)
1612 		return -EOPNOTSUPP;
1613 
1614 	/* On boot, we can set this without any fancy locking. */
1615 	if (!nf_conntrack_htable_size)
1616 		return param_set_uint(val, kp);
1617 
1618 	rc = kstrtouint(val, 0, &hashsize);
1619 	if (rc)
1620 		return rc;
1621 	if (!hashsize)
1622 		return -EINVAL;
1623 
1624 	hash = nf_ct_alloc_hashtable(&hashsize, 1);
1625 	if (!hash)
1626 		return -ENOMEM;
1627 
1628 	local_bh_disable();
1629 	nf_conntrack_all_lock();
1630 	write_seqcount_begin(&init_net.ct.generation);
1631 
1632 	/* Lookups in the old hash might happen in parallel, which means we
1633 	 * might get false negatives during connection lookup. New connections
1634 	 * created because of a false negative won't make it into the hash
1635 	 * though since that required taking the locks.
1636 	 */
1637 
1638 	for (i = 0; i < init_net.ct.htable_size; i++) {
1639 		while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
1640 			h = hlist_nulls_entry(init_net.ct.hash[i].first,
1641 					struct nf_conntrack_tuple_hash, hnnode);
1642 			ct = nf_ct_tuplehash_to_ctrack(h);
1643 			hlist_nulls_del_rcu(&h->hnnode);
1644 			bucket = __hash_conntrack(&h->tuple, hashsize);
1645 			hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
1646 		}
1647 	}
1648 	old_size = init_net.ct.htable_size;
1649 	old_hash = init_net.ct.hash;
1650 
1651 	init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
1652 	init_net.ct.hash = hash;
1653 
1654 	write_seqcount_end(&init_net.ct.generation);
1655 	nf_conntrack_all_unlock();
1656 	local_bh_enable();
1657 
1658 	nf_ct_free_hashtable(old_hash, old_size);
1659 	return 0;
1660 }
1661 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1662 
1663 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1664 		  &nf_conntrack_htable_size, 0600);
1665 
nf_ct_untracked_status_or(unsigned long bits)1666 void nf_ct_untracked_status_or(unsigned long bits)
1667 {
1668 	int cpu;
1669 
1670 	for_each_possible_cpu(cpu)
1671 		per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1672 }
1673 EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1674 
nf_conntrack_init_start(void)1675 int nf_conntrack_init_start(void)
1676 {
1677 	int max_factor = 8;
1678 	int i, ret, cpu;
1679 
1680 	for (i = 0; i < CONNTRACK_LOCKS; i++)
1681 		spin_lock_init(&nf_conntrack_locks[i]);
1682 
1683 	if (!nf_conntrack_htable_size) {
1684 		/* Idea from tcp.c: use 1/16384 of memory.
1685 		 * On i386: 32MB machine has 512 buckets.
1686 		 * >= 1GB machines have 16384 buckets.
1687 		 * >= 4GB machines have 65536 buckets.
1688 		 */
1689 		nf_conntrack_htable_size
1690 			= (((totalram_pages << PAGE_SHIFT) / 16384)
1691 			   / sizeof(struct hlist_head));
1692 		if (totalram_pages > (4 * (1024 * 1024 * 1024 / PAGE_SIZE)))
1693 			nf_conntrack_htable_size = 65536;
1694 		else if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
1695 			nf_conntrack_htable_size = 16384;
1696 		if (nf_conntrack_htable_size < 32)
1697 			nf_conntrack_htable_size = 32;
1698 
1699 		/* Use a max. factor of four by default to get the same max as
1700 		 * with the old struct list_heads. When a table size is given
1701 		 * we use the old value of 8 to avoid reducing the max.
1702 		 * entries. */
1703 		max_factor = 4;
1704 	}
1705 	nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1706 
1707 	printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
1708 	       NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1709 	       nf_conntrack_max);
1710 
1711 	ret = nf_conntrack_expect_init();
1712 	if (ret < 0)
1713 		goto err_expect;
1714 
1715 	ret = nf_conntrack_acct_init();
1716 	if (ret < 0)
1717 		goto err_acct;
1718 
1719 	ret = nf_conntrack_tstamp_init();
1720 	if (ret < 0)
1721 		goto err_tstamp;
1722 
1723 	ret = nf_conntrack_ecache_init();
1724 	if (ret < 0)
1725 		goto err_ecache;
1726 
1727 	ret = nf_conntrack_timeout_init();
1728 	if (ret < 0)
1729 		goto err_timeout;
1730 
1731 	ret = nf_conntrack_helper_init();
1732 	if (ret < 0)
1733 		goto err_helper;
1734 
1735 	ret = nf_conntrack_labels_init();
1736 	if (ret < 0)
1737 		goto err_labels;
1738 
1739 	ret = nf_conntrack_seqadj_init();
1740 	if (ret < 0)
1741 		goto err_seqadj;
1742 
1743 #ifdef CONFIG_NF_CONNTRACK_ZONES
1744 	ret = nf_ct_extend_register(&nf_ct_zone_extend);
1745 	if (ret < 0)
1746 		goto err_extend;
1747 #endif
1748 	ret = nf_conntrack_proto_init();
1749 	if (ret < 0)
1750 		goto err_proto;
1751 
1752 	/* Set up fake conntrack: to never be deleted, not in any hashes */
1753 	for_each_possible_cpu(cpu) {
1754 		struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1755 		write_pnet(&ct->ct_net, &init_net);
1756 		atomic_set(&ct->ct_general.use, 1);
1757 	}
1758 	/*  - and look it like as a confirmed connection */
1759 	nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1760 	return 0;
1761 
1762 err_proto:
1763 #ifdef CONFIG_NF_CONNTRACK_ZONES
1764 	nf_ct_extend_unregister(&nf_ct_zone_extend);
1765 err_extend:
1766 #endif
1767 	nf_conntrack_seqadj_fini();
1768 err_seqadj:
1769 	nf_conntrack_labels_fini();
1770 err_labels:
1771 	nf_conntrack_helper_fini();
1772 err_helper:
1773 	nf_conntrack_timeout_fini();
1774 err_timeout:
1775 	nf_conntrack_ecache_fini();
1776 err_ecache:
1777 	nf_conntrack_tstamp_fini();
1778 err_tstamp:
1779 	nf_conntrack_acct_fini();
1780 err_acct:
1781 	nf_conntrack_expect_fini();
1782 err_expect:
1783 	return ret;
1784 }
1785 
nf_conntrack_init_end(void)1786 void nf_conntrack_init_end(void)
1787 {
1788 	/* For use by REJECT target */
1789 	RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
1790 	RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
1791 }
1792 
1793 /*
1794  * We need to use special "null" values, not used in hash table
1795  */
1796 #define UNCONFIRMED_NULLS_VAL	((1<<30)+0)
1797 #define DYING_NULLS_VAL		((1<<30)+1)
1798 #define TEMPLATE_NULLS_VAL	((1<<30)+2)
1799 
nf_conntrack_init_net(struct net * net)1800 int nf_conntrack_init_net(struct net *net)
1801 {
1802 	static atomic64_t unique_id;
1803 	int ret = -ENOMEM;
1804 	int cpu;
1805 
1806 	atomic_set(&net->ct.count, 0);
1807 	seqcount_init(&net->ct.generation);
1808 
1809 	net->ct.pcpu_lists = alloc_percpu(struct ct_pcpu);
1810 	if (!net->ct.pcpu_lists)
1811 		goto err_stat;
1812 
1813 	for_each_possible_cpu(cpu) {
1814 		struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1815 
1816 		spin_lock_init(&pcpu->lock);
1817 		INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1818 		INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1819 	}
1820 
1821 	net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1822 	if (!net->ct.stat)
1823 		goto err_pcpu_lists;
1824 
1825 	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
1826 				(u64)atomic64_inc_return(&unique_id));
1827 	if (!net->ct.slabname)
1828 		goto err_slabname;
1829 
1830 	net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
1831 							sizeof(struct nf_conn), 0,
1832 							SLAB_DESTROY_BY_RCU, NULL);
1833 	if (!net->ct.nf_conntrack_cachep) {
1834 		printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1835 		goto err_cache;
1836 	}
1837 
1838 	net->ct.htable_size = nf_conntrack_htable_size;
1839 	net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1);
1840 	if (!net->ct.hash) {
1841 		printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1842 		goto err_hash;
1843 	}
1844 	ret = nf_conntrack_expect_pernet_init(net);
1845 	if (ret < 0)
1846 		goto err_expect;
1847 	ret = nf_conntrack_acct_pernet_init(net);
1848 	if (ret < 0)
1849 		goto err_acct;
1850 	ret = nf_conntrack_tstamp_pernet_init(net);
1851 	if (ret < 0)
1852 		goto err_tstamp;
1853 	ret = nf_conntrack_ecache_pernet_init(net);
1854 	if (ret < 0)
1855 		goto err_ecache;
1856 	ret = nf_conntrack_helper_pernet_init(net);
1857 	if (ret < 0)
1858 		goto err_helper;
1859 	ret = nf_conntrack_proto_pernet_init(net);
1860 	if (ret < 0)
1861 		goto err_proto;
1862 	return 0;
1863 
1864 err_proto:
1865 	nf_conntrack_helper_pernet_fini(net);
1866 err_helper:
1867 	nf_conntrack_ecache_pernet_fini(net);
1868 err_ecache:
1869 	nf_conntrack_tstamp_pernet_fini(net);
1870 err_tstamp:
1871 	nf_conntrack_acct_pernet_fini(net);
1872 err_acct:
1873 	nf_conntrack_expect_pernet_fini(net);
1874 err_expect:
1875 	nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1876 err_hash:
1877 	kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1878 err_cache:
1879 	kfree(net->ct.slabname);
1880 err_slabname:
1881 	free_percpu(net->ct.stat);
1882 err_pcpu_lists:
1883 	free_percpu(net->ct.pcpu_lists);
1884 err_stat:
1885 	return ret;
1886 }
1887