• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Connection state tracking for netfilter.  This is separated from,
2    but required by, the NAT layer; it can also be used by an iptables
3    extension. */
4 
5 /* (C) 1999-2001 Paul `Rusty' Russell
6  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
7  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/types.h>
15 #include <linux/netfilter.h>
16 #include <linux/module.h>
17 #include <linux/skbuff.h>
18 #include <linux/proc_fs.h>
19 #include <linux/vmalloc.h>
20 #include <linux/stddef.h>
21 #include <linux/slab.h>
22 #include <linux/random.h>
23 #include <linux/jhash.h>
24 #include <linux/err.h>
25 #include <linux/percpu.h>
26 #include <linux/moduleparam.h>
27 #include <linux/notifier.h>
28 #include <linux/kernel.h>
29 #include <linux/netdevice.h>
30 #include <linux/socket.h>
31 #include <linux/mm.h>
32 
33 #include <net/netfilter/nf_conntrack.h>
34 #include <net/netfilter/nf_conntrack_l3proto.h>
35 #include <net/netfilter/nf_conntrack_l4proto.h>
36 #include <net/netfilter/nf_conntrack_expect.h>
37 #include <net/netfilter/nf_conntrack_helper.h>
38 #include <net/netfilter/nf_conntrack_core.h>
39 #include <net/netfilter/nf_conntrack_extend.h>
40 #include <net/netfilter/nf_conntrack_acct.h>
41 #include <net/netfilter/nf_nat.h>
42 #include <net/netfilter/nf_nat_core.h>
43 
44 #define NF_CONNTRACK_VERSION	"0.5.0"
45 
46 int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
47 				      enum nf_nat_manip_type manip,
48 				      struct nlattr *attr) __read_mostly;
49 EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
50 
51 DEFINE_SPINLOCK(nf_conntrack_lock);
52 EXPORT_SYMBOL_GPL(nf_conntrack_lock);
53 
54 unsigned int nf_conntrack_htable_size __read_mostly;
55 EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
56 
57 int nf_conntrack_max __read_mostly;
58 EXPORT_SYMBOL_GPL(nf_conntrack_max);
59 
60 struct nf_conn nf_conntrack_untracked __read_mostly;
61 EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
62 
63 static struct kmem_cache *nf_conntrack_cachep __read_mostly;
64 
65 static int nf_conntrack_hash_rnd_initted;
66 static unsigned int nf_conntrack_hash_rnd;
67 
__hash_conntrack(const struct nf_conntrack_tuple * tuple,unsigned int size,unsigned int rnd)68 static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
69 				  unsigned int size, unsigned int rnd)
70 {
71 	unsigned int n;
72 	u_int32_t h;
73 
74 	/* The direction must be ignored, so we hash everything up to the
75 	 * destination ports (which is a multiple of 4) and treat the last
76 	 * three bytes manually.
77 	 */
78 	n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32);
79 	h = jhash2((u32 *)tuple, n,
80 		   rnd ^ (((__force __u16)tuple->dst.u.all << 16) |
81 			  tuple->dst.protonum));
82 
83 	return ((u64)h * size) >> 32;
84 }
85 
hash_conntrack(const struct nf_conntrack_tuple * tuple)86 static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
87 {
88 	return __hash_conntrack(tuple, nf_conntrack_htable_size,
89 				nf_conntrack_hash_rnd);
90 }
91 
92 bool
nf_ct_get_tuple(const struct sk_buff * skb,unsigned int nhoff,unsigned int dataoff,u_int16_t l3num,u_int8_t protonum,struct nf_conntrack_tuple * tuple,const struct nf_conntrack_l3proto * l3proto,const struct nf_conntrack_l4proto * l4proto)93 nf_ct_get_tuple(const struct sk_buff *skb,
94 		unsigned int nhoff,
95 		unsigned int dataoff,
96 		u_int16_t l3num,
97 		u_int8_t protonum,
98 		struct nf_conntrack_tuple *tuple,
99 		const struct nf_conntrack_l3proto *l3proto,
100 		const struct nf_conntrack_l4proto *l4proto)
101 {
102 	memset(tuple, 0, sizeof(*tuple));
103 
104 	tuple->src.l3num = l3num;
105 	if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
106 		return false;
107 
108 	tuple->dst.protonum = protonum;
109 	tuple->dst.dir = IP_CT_DIR_ORIGINAL;
110 
111 	return l4proto->pkt_to_tuple(skb, dataoff, tuple);
112 }
113 EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
114 
nf_ct_get_tuplepr(const struct sk_buff * skb,unsigned int nhoff,u_int16_t l3num,struct nf_conntrack_tuple * tuple)115 bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff,
116 		       u_int16_t l3num, struct nf_conntrack_tuple *tuple)
117 {
118 	struct nf_conntrack_l3proto *l3proto;
119 	struct nf_conntrack_l4proto *l4proto;
120 	unsigned int protoff;
121 	u_int8_t protonum;
122 	int ret;
123 
124 	rcu_read_lock();
125 
126 	l3proto = __nf_ct_l3proto_find(l3num);
127 	ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum);
128 	if (ret != NF_ACCEPT) {
129 		rcu_read_unlock();
130 		return false;
131 	}
132 
133 	l4proto = __nf_ct_l4proto_find(l3num, protonum);
134 
135 	ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, tuple,
136 			      l3proto, l4proto);
137 
138 	rcu_read_unlock();
139 	return ret;
140 }
141 EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
142 
143 bool
nf_ct_invert_tuple(struct nf_conntrack_tuple * inverse,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_l3proto * l3proto,const struct nf_conntrack_l4proto * l4proto)144 nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
145 		   const struct nf_conntrack_tuple *orig,
146 		   const struct nf_conntrack_l3proto *l3proto,
147 		   const struct nf_conntrack_l4proto *l4proto)
148 {
149 	memset(inverse, 0, sizeof(*inverse));
150 
151 	inverse->src.l3num = orig->src.l3num;
152 	if (l3proto->invert_tuple(inverse, orig) == 0)
153 		return false;
154 
155 	inverse->dst.dir = !orig->dst.dir;
156 
157 	inverse->dst.protonum = orig->dst.protonum;
158 	return l4proto->invert_tuple(inverse, orig);
159 }
160 EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
161 
162 static void
clean_from_lists(struct nf_conn * ct)163 clean_from_lists(struct nf_conn *ct)
164 {
165 	pr_debug("clean_from_lists(%p)\n", ct);
166 	hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
167 	hlist_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode);
168 
169 	/* Destroy all pending expectations */
170 	nf_ct_remove_expectations(ct);
171 }
172 
173 static void
destroy_conntrack(struct nf_conntrack * nfct)174 destroy_conntrack(struct nf_conntrack *nfct)
175 {
176 	struct nf_conn *ct = (struct nf_conn *)nfct;
177 	struct net *net = nf_ct_net(ct);
178 	struct nf_conntrack_l4proto *l4proto;
179 
180 	pr_debug("destroy_conntrack(%p)\n", ct);
181 	NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
182 	NF_CT_ASSERT(!timer_pending(&ct->timeout));
183 
184 	if (!test_bit(IPS_DYING_BIT, &ct->status))
185 		nf_conntrack_event(IPCT_DESTROY, ct);
186 	set_bit(IPS_DYING_BIT, &ct->status);
187 
188 	/* To make sure we don't get any weird locking issues here:
189 	 * destroy_conntrack() MUST NOT be called with a write lock
190 	 * to nf_conntrack_lock!!! -HW */
191 	rcu_read_lock();
192 	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
193 	if (l4proto && l4proto->destroy)
194 		l4proto->destroy(ct);
195 
196 	rcu_read_unlock();
197 
198 	spin_lock_bh(&nf_conntrack_lock);
199 	/* Expectations will have been removed in clean_from_lists,
200 	 * except TFTP can create an expectation on the first packet,
201 	 * before connection is in the list, so we need to clean here,
202 	 * too. */
203 	nf_ct_remove_expectations(ct);
204 
205 	/* We overload first tuple to link into unconfirmed list. */
206 	if (!nf_ct_is_confirmed(ct)) {
207 		BUG_ON(hlist_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode));
208 		hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
209 	}
210 
211 	NF_CT_STAT_INC(net, delete);
212 	spin_unlock_bh(&nf_conntrack_lock);
213 
214 	if (ct->master)
215 		nf_ct_put(ct->master);
216 
217 	pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
218 	nf_conntrack_free(ct);
219 }
220 
death_by_timeout(unsigned long ul_conntrack)221 static void death_by_timeout(unsigned long ul_conntrack)
222 {
223 	struct nf_conn *ct = (void *)ul_conntrack;
224 	struct net *net = nf_ct_net(ct);
225 	struct nf_conn_help *help = nfct_help(ct);
226 	struct nf_conntrack_helper *helper;
227 
228 	if (help) {
229 		rcu_read_lock();
230 		helper = rcu_dereference(help->helper);
231 		if (helper && helper->destroy)
232 			helper->destroy(ct);
233 		rcu_read_unlock();
234 	}
235 
236 	spin_lock_bh(&nf_conntrack_lock);
237 	/* Inside lock so preempt is disabled on module removal path.
238 	 * Otherwise we can get spurious warnings. */
239 	NF_CT_STAT_INC(net, delete_list);
240 	clean_from_lists(ct);
241 	spin_unlock_bh(&nf_conntrack_lock);
242 	nf_ct_put(ct);
243 }
244 
245 struct nf_conntrack_tuple_hash *
__nf_conntrack_find(struct net * net,const struct nf_conntrack_tuple * tuple)246 __nf_conntrack_find(struct net *net, const struct nf_conntrack_tuple *tuple)
247 {
248 	struct nf_conntrack_tuple_hash *h;
249 	struct hlist_node *n;
250 	unsigned int hash = hash_conntrack(tuple);
251 
252 	/* Disable BHs the entire time since we normally need to disable them
253 	 * at least once for the stats anyway.
254 	 */
255 	local_bh_disable();
256 	hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
257 		if (nf_ct_tuple_equal(tuple, &h->tuple)) {
258 			NF_CT_STAT_INC(net, found);
259 			local_bh_enable();
260 			return h;
261 		}
262 		NF_CT_STAT_INC(net, searched);
263 	}
264 	local_bh_enable();
265 
266 	return NULL;
267 }
268 EXPORT_SYMBOL_GPL(__nf_conntrack_find);
269 
270 /* Find a connection corresponding to a tuple. */
271 struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net * net,const struct nf_conntrack_tuple * tuple)272 nf_conntrack_find_get(struct net *net, const struct nf_conntrack_tuple *tuple)
273 {
274 	struct nf_conntrack_tuple_hash *h;
275 	struct nf_conn *ct;
276 
277 	rcu_read_lock();
278 	h = __nf_conntrack_find(net, tuple);
279 	if (h) {
280 		ct = nf_ct_tuplehash_to_ctrack(h);
281 		if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
282 			h = NULL;
283 	}
284 	rcu_read_unlock();
285 
286 	return h;
287 }
288 EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
289 
__nf_conntrack_hash_insert(struct nf_conn * ct,unsigned int hash,unsigned int repl_hash)290 static void __nf_conntrack_hash_insert(struct nf_conn *ct,
291 				       unsigned int hash,
292 				       unsigned int repl_hash)
293 {
294 	struct net *net = nf_ct_net(ct);
295 
296 	hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
297 			   &net->ct.hash[hash]);
298 	hlist_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnode,
299 			   &net->ct.hash[repl_hash]);
300 }
301 
nf_conntrack_hash_insert(struct nf_conn * ct)302 void nf_conntrack_hash_insert(struct nf_conn *ct)
303 {
304 	unsigned int hash, repl_hash;
305 
306 	hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
307 	repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
308 
309 	__nf_conntrack_hash_insert(ct, hash, repl_hash);
310 }
311 EXPORT_SYMBOL_GPL(nf_conntrack_hash_insert);
312 
313 /* Confirm a connection given skb; places it in hash table */
314 int
__nf_conntrack_confirm(struct sk_buff * skb)315 __nf_conntrack_confirm(struct sk_buff *skb)
316 {
317 	unsigned int hash, repl_hash;
318 	struct nf_conntrack_tuple_hash *h;
319 	struct nf_conn *ct;
320 	struct nf_conn_help *help;
321 	struct hlist_node *n;
322 	enum ip_conntrack_info ctinfo;
323 	struct net *net;
324 
325 	ct = nf_ct_get(skb, &ctinfo);
326 	net = nf_ct_net(ct);
327 
328 	/* ipt_REJECT uses nf_conntrack_attach to attach related
329 	   ICMP/TCP RST packets in other direction.  Actual packet
330 	   which created connection will be IP_CT_NEW or for an
331 	   expected connection, IP_CT_RELATED. */
332 	if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
333 		return NF_ACCEPT;
334 
335 	hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
336 	repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
337 
338 	/* We're not in hash table, and we refuse to set up related
339 	   connections for unconfirmed conns.  But packet copies and
340 	   REJECT will give spurious warnings here. */
341 	/* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
342 
343 	/* No external references means noone else could have
344 	   confirmed us. */
345 	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
346 	pr_debug("Confirming conntrack %p\n", ct);
347 
348 	spin_lock_bh(&nf_conntrack_lock);
349 
350 	/* See if there's one in the list already, including reverse:
351 	   NAT could have grabbed it without realizing, since we're
352 	   not in the hash.  If there is, we lost race. */
353 	hlist_for_each_entry(h, n, &net->ct.hash[hash], hnode)
354 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
355 				      &h->tuple))
356 			goto out;
357 	hlist_for_each_entry(h, n, &net->ct.hash[repl_hash], hnode)
358 		if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
359 				      &h->tuple))
360 			goto out;
361 
362 	/* Remove from unconfirmed list */
363 	hlist_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode);
364 
365 	__nf_conntrack_hash_insert(ct, hash, repl_hash);
366 	/* Timer relative to confirmation time, not original
367 	   setting time, otherwise we'd get timer wrap in
368 	   weird delay cases. */
369 	ct->timeout.expires += jiffies;
370 	add_timer(&ct->timeout);
371 	atomic_inc(&ct->ct_general.use);
372 	set_bit(IPS_CONFIRMED_BIT, &ct->status);
373 	NF_CT_STAT_INC(net, insert);
374 	spin_unlock_bh(&nf_conntrack_lock);
375 	help = nfct_help(ct);
376 	if (help && help->helper)
377 		nf_conntrack_event_cache(IPCT_HELPER, ct);
378 #ifdef CONFIG_NF_NAT_NEEDED
379 	if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
380 	    test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
381 		nf_conntrack_event_cache(IPCT_NATINFO, ct);
382 #endif
383 	nf_conntrack_event_cache(master_ct(ct) ?
384 				 IPCT_RELATED : IPCT_NEW, ct);
385 	return NF_ACCEPT;
386 
387 out:
388 	NF_CT_STAT_INC(net, insert_failed);
389 	spin_unlock_bh(&nf_conntrack_lock);
390 	return NF_DROP;
391 }
392 EXPORT_SYMBOL_GPL(__nf_conntrack_confirm);
393 
394 /* Returns true if a connection correspondings to the tuple (required
395    for NAT). */
396 int
nf_conntrack_tuple_taken(const struct nf_conntrack_tuple * tuple,const struct nf_conn * ignored_conntrack)397 nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
398 			 const struct nf_conn *ignored_conntrack)
399 {
400 	struct net *net = nf_ct_net(ignored_conntrack);
401 	struct nf_conntrack_tuple_hash *h;
402 	struct hlist_node *n;
403 	unsigned int hash = hash_conntrack(tuple);
404 
405 	/* Disable BHs the entire time since we need to disable them at
406 	 * least once for the stats anyway.
407 	 */
408 	rcu_read_lock_bh();
409 	hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnode) {
410 		if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
411 		    nf_ct_tuple_equal(tuple, &h->tuple)) {
412 			NF_CT_STAT_INC(net, found);
413 			rcu_read_unlock_bh();
414 			return 1;
415 		}
416 		NF_CT_STAT_INC(net, searched);
417 	}
418 	rcu_read_unlock_bh();
419 
420 	return 0;
421 }
422 EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
423 
424 #define NF_CT_EVICTION_RANGE	8
425 
426 /* There's a small race here where we may free a just-assured
427    connection.  Too bad: we're in trouble anyway. */
early_drop(struct net * net,unsigned int hash)428 static noinline int early_drop(struct net *net, unsigned int hash)
429 {
430 	/* Use oldest entry, which is roughly LRU */
431 	struct nf_conntrack_tuple_hash *h;
432 	struct nf_conn *ct = NULL, *tmp;
433 	struct hlist_node *n;
434 	unsigned int i, cnt = 0;
435 	int dropped = 0;
436 
437 	rcu_read_lock();
438 	for (i = 0; i < nf_conntrack_htable_size; i++) {
439 		hlist_for_each_entry_rcu(h, n, &net->ct.hash[hash],
440 					 hnode) {
441 			tmp = nf_ct_tuplehash_to_ctrack(h);
442 			if (!test_bit(IPS_ASSURED_BIT, &tmp->status))
443 				ct = tmp;
444 			cnt++;
445 		}
446 
447 		if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
448 			ct = NULL;
449 		if (ct || cnt >= NF_CT_EVICTION_RANGE)
450 			break;
451 		hash = (hash + 1) % nf_conntrack_htable_size;
452 	}
453 	rcu_read_unlock();
454 
455 	if (!ct)
456 		return dropped;
457 
458 	if (del_timer(&ct->timeout)) {
459 		death_by_timeout((unsigned long)ct);
460 		dropped = 1;
461 		NF_CT_STAT_INC_ATOMIC(net, early_drop);
462 	}
463 	nf_ct_put(ct);
464 	return dropped;
465 }
466 
nf_conntrack_alloc(struct net * net,const struct nf_conntrack_tuple * orig,const struct nf_conntrack_tuple * repl,gfp_t gfp)467 struct nf_conn *nf_conntrack_alloc(struct net *net,
468 				   const struct nf_conntrack_tuple *orig,
469 				   const struct nf_conntrack_tuple *repl,
470 				   gfp_t gfp)
471 {
472 	struct nf_conn *ct;
473 
474 	if (unlikely(!nf_conntrack_hash_rnd_initted)) {
475 		get_random_bytes(&nf_conntrack_hash_rnd, 4);
476 		nf_conntrack_hash_rnd_initted = 1;
477 	}
478 
479 	/* We don't want any race condition at early drop stage */
480 	atomic_inc(&net->ct.count);
481 
482 	if (nf_conntrack_max &&
483 	    unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
484 		unsigned int hash = hash_conntrack(orig);
485 		if (!early_drop(net, hash)) {
486 			atomic_dec(&net->ct.count);
487 			if (net_ratelimit())
488 				printk(KERN_WARNING
489 				       "nf_conntrack: table full, dropping"
490 				       " packet.\n");
491 			return ERR_PTR(-ENOMEM);
492 		}
493 	}
494 
495 	ct = kmem_cache_zalloc(nf_conntrack_cachep, gfp);
496 	if (ct == NULL) {
497 		pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
498 		atomic_dec(&net->ct.count);
499 		return ERR_PTR(-ENOMEM);
500 	}
501 
502 	atomic_set(&ct->ct_general.use, 1);
503 	ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
504 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
505 	/* Don't set timer yet: wait for confirmation */
506 	setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
507 #ifdef CONFIG_NET_NS
508 	ct->ct_net = net;
509 #endif
510 	INIT_RCU_HEAD(&ct->rcu);
511 
512 	return ct;
513 }
514 EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
515 
nf_conntrack_free_rcu(struct rcu_head * head)516 static void nf_conntrack_free_rcu(struct rcu_head *head)
517 {
518 	struct nf_conn *ct = container_of(head, struct nf_conn, rcu);
519 	struct net *net = nf_ct_net(ct);
520 
521 	nf_ct_ext_free(ct);
522 	kmem_cache_free(nf_conntrack_cachep, ct);
523 	atomic_dec(&net->ct.count);
524 }
525 
nf_conntrack_free(struct nf_conn * ct)526 void nf_conntrack_free(struct nf_conn *ct)
527 {
528 	nf_ct_ext_destroy(ct);
529 	call_rcu(&ct->rcu, nf_conntrack_free_rcu);
530 }
531 EXPORT_SYMBOL_GPL(nf_conntrack_free);
532 
533 /* Allocate a new conntrack: we return -ENOMEM if classification
534    failed due to stress.  Otherwise it really is unclassifiable. */
535 static struct nf_conntrack_tuple_hash *
init_conntrack(struct net * net,const struct nf_conntrack_tuple * tuple,struct nf_conntrack_l3proto * l3proto,struct nf_conntrack_l4proto * l4proto,struct sk_buff * skb,unsigned int dataoff)536 init_conntrack(struct net *net,
537 	       const struct nf_conntrack_tuple *tuple,
538 	       struct nf_conntrack_l3proto *l3proto,
539 	       struct nf_conntrack_l4proto *l4proto,
540 	       struct sk_buff *skb,
541 	       unsigned int dataoff)
542 {
543 	struct nf_conn *ct;
544 	struct nf_conn_help *help;
545 	struct nf_conntrack_tuple repl_tuple;
546 	struct nf_conntrack_expect *exp;
547 
548 	if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
549 		pr_debug("Can't invert tuple.\n");
550 		return NULL;
551 	}
552 
553 	ct = nf_conntrack_alloc(net, tuple, &repl_tuple, GFP_ATOMIC);
554 	if (IS_ERR(ct)) {
555 		pr_debug("Can't allocate conntrack.\n");
556 		return (struct nf_conntrack_tuple_hash *)ct;
557 	}
558 
559 	if (!l4proto->new(ct, skb, dataoff)) {
560 		nf_conntrack_free(ct);
561 		pr_debug("init conntrack: can't track with proto module\n");
562 		return NULL;
563 	}
564 
565 	nf_ct_acct_ext_add(ct, GFP_ATOMIC);
566 
567 	spin_lock_bh(&nf_conntrack_lock);
568 	exp = nf_ct_find_expectation(net, tuple);
569 	if (exp) {
570 		pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
571 			 ct, exp);
572 		/* Welcome, Mr. Bond.  We've been expecting you... */
573 		__set_bit(IPS_EXPECTED_BIT, &ct->status);
574 		ct->master = exp->master;
575 		if (exp->helper) {
576 			help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
577 			if (help)
578 				rcu_assign_pointer(help->helper, exp->helper);
579 		}
580 
581 #ifdef CONFIG_NF_CONNTRACK_MARK
582 		ct->mark = exp->master->mark;
583 #endif
584 #ifdef CONFIG_NF_CONNTRACK_SECMARK
585 		ct->secmark = exp->master->secmark;
586 #endif
587 		nf_conntrack_get(&ct->master->ct_general);
588 		NF_CT_STAT_INC(net, expect_new);
589 	} else {
590 		__nf_ct_try_assign_helper(ct, GFP_ATOMIC);
591 		NF_CT_STAT_INC(net, new);
592 	}
593 
594 	/* Overload tuple linked list to put us in unconfirmed list. */
595 	hlist_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnode,
596 		       &net->ct.unconfirmed);
597 
598 	spin_unlock_bh(&nf_conntrack_lock);
599 
600 	if (exp) {
601 		if (exp->expectfn)
602 			exp->expectfn(ct, exp);
603 		nf_ct_expect_put(exp);
604 	}
605 
606 	return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
607 }
608 
609 /* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
610 static inline struct nf_conn *
resolve_normal_ct(struct net * net,struct sk_buff * skb,unsigned int dataoff,u_int16_t l3num,u_int8_t protonum,struct nf_conntrack_l3proto * l3proto,struct nf_conntrack_l4proto * l4proto,int * set_reply,enum ip_conntrack_info * ctinfo)611 resolve_normal_ct(struct net *net,
612 		  struct sk_buff *skb,
613 		  unsigned int dataoff,
614 		  u_int16_t l3num,
615 		  u_int8_t protonum,
616 		  struct nf_conntrack_l3proto *l3proto,
617 		  struct nf_conntrack_l4proto *l4proto,
618 		  int *set_reply,
619 		  enum ip_conntrack_info *ctinfo)
620 {
621 	struct nf_conntrack_tuple tuple;
622 	struct nf_conntrack_tuple_hash *h;
623 	struct nf_conn *ct;
624 
625 	if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
626 			     dataoff, l3num, protonum, &tuple, l3proto,
627 			     l4proto)) {
628 		pr_debug("resolve_normal_ct: Can't get tuple\n");
629 		return NULL;
630 	}
631 
632 	/* look for tuple match */
633 	h = nf_conntrack_find_get(net, &tuple);
634 	if (!h) {
635 		h = init_conntrack(net, &tuple, l3proto, l4proto, skb, dataoff);
636 		if (!h)
637 			return NULL;
638 		if (IS_ERR(h))
639 			return (void *)h;
640 	}
641 	ct = nf_ct_tuplehash_to_ctrack(h);
642 
643 	/* It exists; we have (non-exclusive) reference. */
644 	if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
645 		*ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
646 		/* Please set reply bit if this packet OK */
647 		*set_reply = 1;
648 	} else {
649 		/* Once we've had two way comms, always ESTABLISHED. */
650 		if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
651 			pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
652 			*ctinfo = IP_CT_ESTABLISHED;
653 		} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
654 			pr_debug("nf_conntrack_in: related packet for %p\n",
655 				 ct);
656 			*ctinfo = IP_CT_RELATED;
657 		} else {
658 			pr_debug("nf_conntrack_in: new packet for %p\n", ct);
659 			*ctinfo = IP_CT_NEW;
660 		}
661 		*set_reply = 0;
662 	}
663 	skb->nfct = &ct->ct_general;
664 	skb->nfctinfo = *ctinfo;
665 	return ct;
666 }
667 
668 unsigned int
nf_conntrack_in(struct net * net,u_int8_t pf,unsigned int hooknum,struct sk_buff * skb)669 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
670 		struct sk_buff *skb)
671 {
672 	struct nf_conn *ct;
673 	enum ip_conntrack_info ctinfo;
674 	struct nf_conntrack_l3proto *l3proto;
675 	struct nf_conntrack_l4proto *l4proto;
676 	unsigned int dataoff;
677 	u_int8_t protonum;
678 	int set_reply = 0;
679 	int ret;
680 
681 	/* Previously seen (loopback or untracked)?  Ignore. */
682 	if (skb->nfct) {
683 		NF_CT_STAT_INC_ATOMIC(net, ignore);
684 		return NF_ACCEPT;
685 	}
686 
687 	/* rcu_read_lock()ed by nf_hook_slow */
688 	l3proto = __nf_ct_l3proto_find(pf);
689 	ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
690 				   &dataoff, &protonum);
691 	if (ret <= 0) {
692 		pr_debug("not prepared to track yet or error occured\n");
693 		NF_CT_STAT_INC_ATOMIC(net, error);
694 		NF_CT_STAT_INC_ATOMIC(net, invalid);
695 		return -ret;
696 	}
697 
698 	l4proto = __nf_ct_l4proto_find(pf, protonum);
699 
700 	/* It may be an special packet, error, unclean...
701 	 * inverse of the return code tells to the netfilter
702 	 * core what to do with the packet. */
703 	if (l4proto->error != NULL) {
704 		ret = l4proto->error(net, skb, dataoff, &ctinfo, pf, hooknum);
705 		if (ret <= 0) {
706 			NF_CT_STAT_INC_ATOMIC(net, error);
707 			NF_CT_STAT_INC_ATOMIC(net, invalid);
708 			return -ret;
709 		}
710 	}
711 
712 	ct = resolve_normal_ct(net, skb, dataoff, pf, protonum,
713 			       l3proto, l4proto, &set_reply, &ctinfo);
714 	if (!ct) {
715 		/* Not valid part of a connection */
716 		NF_CT_STAT_INC_ATOMIC(net, invalid);
717 		return NF_ACCEPT;
718 	}
719 
720 	if (IS_ERR(ct)) {
721 		/* Too stressed to deal. */
722 		NF_CT_STAT_INC_ATOMIC(net, drop);
723 		return NF_DROP;
724 	}
725 
726 	NF_CT_ASSERT(skb->nfct);
727 
728 	ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
729 	if (ret <= 0) {
730 		/* Invalid: inverse of the return code tells
731 		 * the netfilter core what to do */
732 		pr_debug("nf_conntrack_in: Can't track with proto module\n");
733 		nf_conntrack_put(skb->nfct);
734 		skb->nfct = NULL;
735 		NF_CT_STAT_INC_ATOMIC(net, invalid);
736 		return -ret;
737 	}
738 
739 	if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
740 		nf_conntrack_event_cache(IPCT_STATUS, ct);
741 
742 	return ret;
743 }
744 EXPORT_SYMBOL_GPL(nf_conntrack_in);
745 
nf_ct_invert_tuplepr(struct nf_conntrack_tuple * inverse,const struct nf_conntrack_tuple * orig)746 bool nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
747 			  const struct nf_conntrack_tuple *orig)
748 {
749 	bool ret;
750 
751 	rcu_read_lock();
752 	ret = nf_ct_invert_tuple(inverse, orig,
753 				 __nf_ct_l3proto_find(orig->src.l3num),
754 				 __nf_ct_l4proto_find(orig->src.l3num,
755 						      orig->dst.protonum));
756 	rcu_read_unlock();
757 	return ret;
758 }
759 EXPORT_SYMBOL_GPL(nf_ct_invert_tuplepr);
760 
761 /* Alter reply tuple (maybe alter helper).  This is for NAT, and is
762    implicitly racy: see __nf_conntrack_confirm */
nf_conntrack_alter_reply(struct nf_conn * ct,const struct nf_conntrack_tuple * newreply)763 void nf_conntrack_alter_reply(struct nf_conn *ct,
764 			      const struct nf_conntrack_tuple *newreply)
765 {
766 	struct nf_conn_help *help = nfct_help(ct);
767 
768 	/* Should be unconfirmed, so not in hash table yet */
769 	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
770 
771 	pr_debug("Altering reply tuple of %p to ", ct);
772 	nf_ct_dump_tuple(newreply);
773 
774 	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
775 	if (ct->master || (help && !hlist_empty(&help->expectations)))
776 		return;
777 
778 	rcu_read_lock();
779 	__nf_ct_try_assign_helper(ct, GFP_ATOMIC);
780 	rcu_read_unlock();
781 }
782 EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
783 
784 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
__nf_ct_refresh_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb,unsigned long extra_jiffies,int do_acct)785 void __nf_ct_refresh_acct(struct nf_conn *ct,
786 			  enum ip_conntrack_info ctinfo,
787 			  const struct sk_buff *skb,
788 			  unsigned long extra_jiffies,
789 			  int do_acct)
790 {
791 	int event = 0;
792 
793 	NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
794 	NF_CT_ASSERT(skb);
795 
796 	spin_lock_bh(&nf_conntrack_lock);
797 
798 	/* Only update if this is not a fixed timeout */
799 	if (test_bit(IPS_FIXED_TIMEOUT_BIT, &ct->status))
800 		goto acct;
801 
802 	/* If not in hash table, timer will not be active yet */
803 	if (!nf_ct_is_confirmed(ct)) {
804 		ct->timeout.expires = extra_jiffies;
805 		event = IPCT_REFRESH;
806 	} else {
807 		unsigned long newtime = jiffies + extra_jiffies;
808 
809 		/* Only update the timeout if the new timeout is at least
810 		   HZ jiffies from the old timeout. Need del_timer for race
811 		   avoidance (may already be dying). */
812 		if (newtime - ct->timeout.expires >= HZ
813 		    && del_timer(&ct->timeout)) {
814 			ct->timeout.expires = newtime;
815 			add_timer(&ct->timeout);
816 			event = IPCT_REFRESH;
817 		}
818 	}
819 
820 acct:
821 	if (do_acct) {
822 		struct nf_conn_counter *acct;
823 
824 		acct = nf_conn_acct_find(ct);
825 		if (acct) {
826 			acct[CTINFO2DIR(ctinfo)].packets++;
827 			acct[CTINFO2DIR(ctinfo)].bytes +=
828 				skb->len - skb_network_offset(skb);
829 		}
830 	}
831 
832 	spin_unlock_bh(&nf_conntrack_lock);
833 
834 	/* must be unlocked when calling event cache */
835 	if (event)
836 		nf_conntrack_event_cache(event, ct);
837 }
838 EXPORT_SYMBOL_GPL(__nf_ct_refresh_acct);
839 
__nf_ct_kill_acct(struct nf_conn * ct,enum ip_conntrack_info ctinfo,const struct sk_buff * skb,int do_acct)840 bool __nf_ct_kill_acct(struct nf_conn *ct,
841 		       enum ip_conntrack_info ctinfo,
842 		       const struct sk_buff *skb,
843 		       int do_acct)
844 {
845 	if (do_acct) {
846 		struct nf_conn_counter *acct;
847 
848 		spin_lock_bh(&nf_conntrack_lock);
849 		acct = nf_conn_acct_find(ct);
850 		if (acct) {
851 			acct[CTINFO2DIR(ctinfo)].packets++;
852 			acct[CTINFO2DIR(ctinfo)].bytes +=
853 				skb->len - skb_network_offset(skb);
854 		}
855 		spin_unlock_bh(&nf_conntrack_lock);
856 	}
857 
858 	if (del_timer(&ct->timeout)) {
859 		ct->timeout.function((unsigned long)ct);
860 		return true;
861 	}
862 	return false;
863 }
864 EXPORT_SYMBOL_GPL(__nf_ct_kill_acct);
865 
866 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
867 
868 #include <linux/netfilter/nfnetlink.h>
869 #include <linux/netfilter/nfnetlink_conntrack.h>
870 #include <linux/mutex.h>
871 
872 /* Generic function for tcp/udp/sctp/dccp and alike. This needs to be
873  * in ip_conntrack_core, since we don't want the protocols to autoload
874  * or depend on ctnetlink */
nf_ct_port_tuple_to_nlattr(struct sk_buff * skb,const struct nf_conntrack_tuple * tuple)875 int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
876 			       const struct nf_conntrack_tuple *tuple)
877 {
878 	NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port);
879 	NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port);
880 	return 0;
881 
882 nla_put_failure:
883 	return -1;
884 }
885 EXPORT_SYMBOL_GPL(nf_ct_port_tuple_to_nlattr);
886 
887 const struct nla_policy nf_ct_port_nla_policy[CTA_PROTO_MAX+1] = {
888 	[CTA_PROTO_SRC_PORT]  = { .type = NLA_U16 },
889 	[CTA_PROTO_DST_PORT]  = { .type = NLA_U16 },
890 };
891 EXPORT_SYMBOL_GPL(nf_ct_port_nla_policy);
892 
nf_ct_port_nlattr_to_tuple(struct nlattr * tb[],struct nf_conntrack_tuple * t)893 int nf_ct_port_nlattr_to_tuple(struct nlattr *tb[],
894 			       struct nf_conntrack_tuple *t)
895 {
896 	if (!tb[CTA_PROTO_SRC_PORT] || !tb[CTA_PROTO_DST_PORT])
897 		return -EINVAL;
898 
899 	t->src.u.tcp.port = nla_get_be16(tb[CTA_PROTO_SRC_PORT]);
900 	t->dst.u.tcp.port = nla_get_be16(tb[CTA_PROTO_DST_PORT]);
901 
902 	return 0;
903 }
904 EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_to_tuple);
905 #endif
906 
907 /* Used by ipt_REJECT and ip6t_REJECT. */
nf_conntrack_attach(struct sk_buff * nskb,struct sk_buff * skb)908 static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
909 {
910 	struct nf_conn *ct;
911 	enum ip_conntrack_info ctinfo;
912 
913 	/* This ICMP is in reverse direction to the packet which caused it */
914 	ct = nf_ct_get(skb, &ctinfo);
915 	if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
916 		ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
917 	else
918 		ctinfo = IP_CT_RELATED;
919 
920 	/* Attach to new skbuff, and increment count */
921 	nskb->nfct = &ct->ct_general;
922 	nskb->nfctinfo = ctinfo;
923 	nf_conntrack_get(nskb->nfct);
924 }
925 
926 /* Bring out ya dead! */
927 static struct nf_conn *
get_next_corpse(struct net * net,int (* iter)(struct nf_conn * i,void * data),void * data,unsigned int * bucket)928 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
929 		void *data, unsigned int *bucket)
930 {
931 	struct nf_conntrack_tuple_hash *h;
932 	struct nf_conn *ct;
933 	struct hlist_node *n;
934 
935 	spin_lock_bh(&nf_conntrack_lock);
936 	for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
937 		hlist_for_each_entry(h, n, &net->ct.hash[*bucket], hnode) {
938 			ct = nf_ct_tuplehash_to_ctrack(h);
939 			if (iter(ct, data))
940 				goto found;
941 		}
942 	}
943 	hlist_for_each_entry(h, n, &net->ct.unconfirmed, hnode) {
944 		ct = nf_ct_tuplehash_to_ctrack(h);
945 		if (iter(ct, data))
946 			set_bit(IPS_DYING_BIT, &ct->status);
947 	}
948 	spin_unlock_bh(&nf_conntrack_lock);
949 	return NULL;
950 found:
951 	atomic_inc(&ct->ct_general.use);
952 	spin_unlock_bh(&nf_conntrack_lock);
953 	return ct;
954 }
955 
nf_ct_iterate_cleanup(struct net * net,int (* iter)(struct nf_conn * i,void * data),void * data)956 void nf_ct_iterate_cleanup(struct net *net,
957 			   int (*iter)(struct nf_conn *i, void *data),
958 			   void *data)
959 {
960 	struct nf_conn *ct;
961 	unsigned int bucket = 0;
962 
963 	while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
964 		/* Time to push up daises... */
965 		if (del_timer(&ct->timeout))
966 			death_by_timeout((unsigned long)ct);
967 		/* ... else the timer will get him soon. */
968 
969 		nf_ct_put(ct);
970 	}
971 }
972 EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
973 
974 struct __nf_ct_flush_report {
975 	u32 pid;
976 	int report;
977 };
978 
kill_all(struct nf_conn * i,void * data)979 static int kill_all(struct nf_conn *i, void *data)
980 {
981 	struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
982 
983 	/* get_next_corpse sets the dying bit for us */
984 	nf_conntrack_event_report(IPCT_DESTROY,
985 				  i,
986 				  fr->pid,
987 				  fr->report);
988 	return 1;
989 }
990 
nf_ct_free_hashtable(struct hlist_head * hash,int vmalloced,unsigned int size)991 void nf_ct_free_hashtable(struct hlist_head *hash, int vmalloced, unsigned int size)
992 {
993 	if (vmalloced)
994 		vfree(hash);
995 	else
996 		free_pages((unsigned long)hash,
997 			   get_order(sizeof(struct hlist_head) * size));
998 }
999 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
1000 
nf_conntrack_flush(struct net * net,u32 pid,int report)1001 void nf_conntrack_flush(struct net *net, u32 pid, int report)
1002 {
1003 	struct __nf_ct_flush_report fr = {
1004 		.pid 	= pid,
1005 		.report = report,
1006 	};
1007 	nf_ct_iterate_cleanup(net, kill_all, &fr);
1008 }
1009 EXPORT_SYMBOL_GPL(nf_conntrack_flush);
1010 
nf_conntrack_cleanup_init_net(void)1011 static void nf_conntrack_cleanup_init_net(void)
1012 {
1013 	nf_conntrack_helper_fini();
1014 	nf_conntrack_proto_fini();
1015 	kmem_cache_destroy(nf_conntrack_cachep);
1016 }
1017 
nf_conntrack_cleanup_net(struct net * net)1018 static void nf_conntrack_cleanup_net(struct net *net)
1019 {
1020 	nf_ct_event_cache_flush(net);
1021 	nf_conntrack_ecache_fini(net);
1022  i_see_dead_people:
1023 	nf_conntrack_flush(net, 0, 0);
1024 	if (atomic_read(&net->ct.count) != 0) {
1025 		schedule();
1026 		goto i_see_dead_people;
1027 	}
1028 	/* wait until all references to nf_conntrack_untracked are dropped */
1029 	while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1030 		schedule();
1031 
1032 	nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1033 			     nf_conntrack_htable_size);
1034 	nf_conntrack_acct_fini(net);
1035 	nf_conntrack_expect_fini(net);
1036 	free_percpu(net->ct.stat);
1037 }
1038 
1039 /* Mishearing the voices in his head, our hero wonders how he's
1040    supposed to kill the mall. */
nf_conntrack_cleanup(struct net * net)1041 void nf_conntrack_cleanup(struct net *net)
1042 {
1043 	if (net_eq(net, &init_net))
1044 		rcu_assign_pointer(ip_ct_attach, NULL);
1045 
1046 	/* This makes sure all current packets have passed through
1047 	   netfilter framework.  Roll on, two-stage module
1048 	   delete... */
1049 	synchronize_net();
1050 
1051 	nf_conntrack_cleanup_net(net);
1052 
1053 	if (net_eq(net, &init_net)) {
1054 		rcu_assign_pointer(nf_ct_destroy, NULL);
1055 		nf_conntrack_cleanup_init_net();
1056 	}
1057 }
1058 
nf_ct_alloc_hashtable(unsigned int * sizep,int * vmalloced)1059 struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced)
1060 {
1061 	struct hlist_head *hash;
1062 	unsigned int size, i;
1063 
1064 	*vmalloced = 0;
1065 
1066 	size = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_head));
1067 	hash = (void*)__get_free_pages(GFP_KERNEL|__GFP_NOWARN,
1068 				       get_order(sizeof(struct hlist_head)
1069 						 * size));
1070 	if (!hash) {
1071 		*vmalloced = 1;
1072 		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1073 		hash = vmalloc(sizeof(struct hlist_head) * size);
1074 	}
1075 
1076 	if (hash)
1077 		for (i = 0; i < size; i++)
1078 			INIT_HLIST_HEAD(&hash[i]);
1079 
1080 	return hash;
1081 }
1082 EXPORT_SYMBOL_GPL(nf_ct_alloc_hashtable);
1083 
nf_conntrack_set_hashsize(const char * val,struct kernel_param * kp)1084 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
1085 {
1086 	int i, bucket, vmalloced, old_vmalloced;
1087 	unsigned int hashsize, old_size;
1088 	int rnd;
1089 	struct hlist_head *hash, *old_hash;
1090 	struct nf_conntrack_tuple_hash *h;
1091 
1092 	/* On boot, we can set this without any fancy locking. */
1093 	if (!nf_conntrack_htable_size)
1094 		return param_set_uint(val, kp);
1095 
1096 	hashsize = simple_strtoul(val, NULL, 0);
1097 	if (!hashsize)
1098 		return -EINVAL;
1099 
1100 	hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced);
1101 	if (!hash)
1102 		return -ENOMEM;
1103 
1104 	/* We have to rehahs for the new table anyway, so we also can
1105 	 * use a newrandom seed */
1106 	get_random_bytes(&rnd, 4);
1107 
1108 	/* Lookups in the old hash might happen in parallel, which means we
1109 	 * might get false negatives during connection lookup. New connections
1110 	 * created because of a false negative won't make it into the hash
1111 	 * though since that required taking the lock.
1112 	 */
1113 	spin_lock_bh(&nf_conntrack_lock);
1114 	for (i = 0; i < nf_conntrack_htable_size; i++) {
1115 		while (!hlist_empty(&init_net.ct.hash[i])) {
1116 			h = hlist_entry(init_net.ct.hash[i].first,
1117 					struct nf_conntrack_tuple_hash, hnode);
1118 			hlist_del_rcu(&h->hnode);
1119 			bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1120 			hlist_add_head(&h->hnode, &hash[bucket]);
1121 		}
1122 	}
1123 	old_size = nf_conntrack_htable_size;
1124 	old_vmalloced = init_net.ct.hash_vmalloc;
1125 	old_hash = init_net.ct.hash;
1126 
1127 	nf_conntrack_htable_size = hashsize;
1128 	init_net.ct.hash_vmalloc = vmalloced;
1129 	init_net.ct.hash = hash;
1130 	nf_conntrack_hash_rnd = rnd;
1131 	spin_unlock_bh(&nf_conntrack_lock);
1132 
1133 	nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
1134 	return 0;
1135 }
1136 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1137 
1138 module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1139 		  &nf_conntrack_htable_size, 0600);
1140 
nf_conntrack_init_init_net(void)1141 static int nf_conntrack_init_init_net(void)
1142 {
1143 	int max_factor = 8;
1144 	int ret;
1145 
1146 	/* Idea from tcp.c: use 1/16384 of memory.  On i386: 32MB
1147 	 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
1148 	if (!nf_conntrack_htable_size) {
1149 		nf_conntrack_htable_size
1150 			= (((num_physpages << PAGE_SHIFT) / 16384)
1151 			   / sizeof(struct hlist_head));
1152 		if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
1153 			nf_conntrack_htable_size = 16384;
1154 		if (nf_conntrack_htable_size < 32)
1155 			nf_conntrack_htable_size = 32;
1156 
1157 		/* Use a max. factor of four by default to get the same max as
1158 		 * with the old struct list_heads. When a table size is given
1159 		 * we use the old value of 8 to avoid reducing the max.
1160 		 * entries. */
1161 		max_factor = 4;
1162 	}
1163 	nf_conntrack_max = max_factor * nf_conntrack_htable_size;
1164 
1165 	printk("nf_conntrack version %s (%u buckets, %d max)\n",
1166 	       NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
1167 	       nf_conntrack_max);
1168 
1169 	nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
1170 						sizeof(struct nf_conn),
1171 						0, 0, NULL);
1172 	if (!nf_conntrack_cachep) {
1173 		printk(KERN_ERR "Unable to create nf_conn slab cache\n");
1174 		ret = -ENOMEM;
1175 		goto err_cache;
1176 	}
1177 
1178 	ret = nf_conntrack_proto_init();
1179 	if (ret < 0)
1180 		goto err_proto;
1181 
1182 	ret = nf_conntrack_helper_init();
1183 	if (ret < 0)
1184 		goto err_helper;
1185 
1186 	return 0;
1187 
1188 err_helper:
1189 	nf_conntrack_proto_fini();
1190 err_proto:
1191 	kmem_cache_destroy(nf_conntrack_cachep);
1192 err_cache:
1193 	return ret;
1194 }
1195 
nf_conntrack_init_net(struct net * net)1196 static int nf_conntrack_init_net(struct net *net)
1197 {
1198 	int ret;
1199 
1200 	atomic_set(&net->ct.count, 0);
1201 	INIT_HLIST_HEAD(&net->ct.unconfirmed);
1202 	net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
1203 	if (!net->ct.stat) {
1204 		ret = -ENOMEM;
1205 		goto err_stat;
1206 	}
1207 	ret = nf_conntrack_ecache_init(net);
1208 	if (ret < 0)
1209 		goto err_ecache;
1210 	net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
1211 						  &net->ct.hash_vmalloc);
1212 	if (!net->ct.hash) {
1213 		ret = -ENOMEM;
1214 		printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
1215 		goto err_hash;
1216 	}
1217 	ret = nf_conntrack_expect_init(net);
1218 	if (ret < 0)
1219 		goto err_expect;
1220 	ret = nf_conntrack_acct_init(net);
1221 	if (ret < 0)
1222 		goto err_acct;
1223 
1224 	/* Set up fake conntrack:
1225 	    - to never be deleted, not in any hashes */
1226 #ifdef CONFIG_NET_NS
1227 	nf_conntrack_untracked.ct_net = &init_net;
1228 #endif
1229 	atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
1230 	/*  - and look it like as a confirmed connection */
1231 	set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
1232 
1233 	return 0;
1234 
1235 err_acct:
1236 	nf_conntrack_expect_fini(net);
1237 err_expect:
1238 	nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
1239 			     nf_conntrack_htable_size);
1240 err_hash:
1241 	nf_conntrack_ecache_fini(net);
1242 err_ecache:
1243 	free_percpu(net->ct.stat);
1244 err_stat:
1245 	return ret;
1246 }
1247 
nf_conntrack_init(struct net * net)1248 int nf_conntrack_init(struct net *net)
1249 {
1250 	int ret;
1251 
1252 	if (net_eq(net, &init_net)) {
1253 		ret = nf_conntrack_init_init_net();
1254 		if (ret < 0)
1255 			goto out_init_net;
1256 	}
1257 	ret = nf_conntrack_init_net(net);
1258 	if (ret < 0)
1259 		goto out_net;
1260 
1261 	if (net_eq(net, &init_net)) {
1262 		/* For use by REJECT target */
1263 		rcu_assign_pointer(ip_ct_attach, nf_conntrack_attach);
1264 		rcu_assign_pointer(nf_ct_destroy, destroy_conntrack);
1265 	}
1266 	return 0;
1267 
1268 out_net:
1269 	if (net_eq(net, &init_net))
1270 		nf_conntrack_cleanup_init_net();
1271 out_init_net:
1272 	return ret;
1273 }
1274