• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	xt_hashlimit - Netfilter module to limit the number of packets per time
3  *	seperately for each hashbucket (sourceip/sourceport/dstip/dstport)
4  *
5  *	(C) 2003-2004 by Harald Welte <laforge@netfilter.org>
6  *	Copyright © CC Computer Consultants GmbH, 2007 - 2008
7  *
8  * Development of this code was funded by Astaro AG, http://www.astaro.com/
9  */
10 #include <linux/module.h>
11 #include <linux/spinlock.h>
12 #include <linux/random.h>
13 #include <linux/jhash.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/list.h>
19 #include <linux/skbuff.h>
20 #include <linux/mm.h>
21 #include <linux/in.h>
22 #include <linux/ip.h>
23 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
24 #include <linux/ipv6.h>
25 #include <net/ipv6.h>
26 #endif
27 
28 #include <net/net_namespace.h>
29 
30 #include <linux/netfilter/x_tables.h>
31 #include <linux/netfilter_ipv4/ip_tables.h>
32 #include <linux/netfilter_ipv6/ip6_tables.h>
33 #include <linux/netfilter/xt_hashlimit.h>
34 #include <linux/mutex.h>
35 
36 MODULE_LICENSE("GPL");
37 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
38 MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
39 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
40 MODULE_ALIAS("ipt_hashlimit");
41 MODULE_ALIAS("ip6t_hashlimit");
42 
43 /* need to declare this at the top */
44 static struct proc_dir_entry *hashlimit_procdir4;
45 static struct proc_dir_entry *hashlimit_procdir6;
46 static const struct file_operations dl_file_ops;
47 
48 /* hash table crap */
49 struct dsthash_dst {
50 	union {
51 		struct {
52 			__be32 src;
53 			__be32 dst;
54 		} ip;
55 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
56 		struct {
57 			__be32 src[4];
58 			__be32 dst[4];
59 		} ip6;
60 #endif
61 	};
62 	__be16 src_port;
63 	__be16 dst_port;
64 };
65 
66 struct dsthash_ent {
67 	/* static / read-only parts in the beginning */
68 	struct hlist_node node;
69 	struct dsthash_dst dst;
70 
71 	/* modified structure members in the end */
72 	unsigned long expires;		/* precalculated expiry time */
73 	struct {
74 		unsigned long prev;	/* last modification */
75 		u_int32_t credit;
76 		u_int32_t credit_cap, cost;
77 	} rateinfo;
78 };
79 
80 struct xt_hashlimit_htable {
81 	struct hlist_node node;		/* global list of all htables */
82 	atomic_t use;
83 	u_int8_t family;
84 
85 	struct hashlimit_cfg1 cfg;	/* config */
86 
87 	/* used internally */
88 	spinlock_t lock;		/* lock for list_head */
89 	u_int32_t rnd;			/* random seed for hash */
90 	int rnd_initialized;
91 	unsigned int count;		/* number entries in table */
92 	struct timer_list timer;	/* timer for gc */
93 
94 	/* seq_file stuff */
95 	struct proc_dir_entry *pde;
96 
97 	struct hlist_head hash[0];	/* hashtable itself */
98 };
99 
100 static DEFINE_SPINLOCK(hashlimit_lock);	/* protects htables list */
101 static DEFINE_MUTEX(hlimit_mutex);	/* additional checkentry protection */
102 static HLIST_HEAD(hashlimit_htables);
103 static struct kmem_cache *hashlimit_cachep __read_mostly;
104 
dst_cmp(const struct dsthash_ent * ent,const struct dsthash_dst * b)105 static inline bool dst_cmp(const struct dsthash_ent *ent,
106 			   const struct dsthash_dst *b)
107 {
108 	return !memcmp(&ent->dst, b, sizeof(ent->dst));
109 }
110 
111 static u_int32_t
hash_dst(const struct xt_hashlimit_htable * ht,const struct dsthash_dst * dst)112 hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
113 {
114 	u_int32_t hash = jhash2((const u32 *)dst,
115 				sizeof(*dst)/sizeof(u32),
116 				ht->rnd);
117 	/*
118 	 * Instead of returning hash % ht->cfg.size (implying a divide)
119 	 * we return the high 32 bits of the (hash * ht->cfg.size) that will
120 	 * give results between [0 and cfg.size-1] and same hash distribution,
121 	 * but using a multiply, less expensive than a divide
122 	 */
123 	return ((u64)hash * ht->cfg.size) >> 32;
124 }
125 
126 static struct dsthash_ent *
dsthash_find(const struct xt_hashlimit_htable * ht,const struct dsthash_dst * dst)127 dsthash_find(const struct xt_hashlimit_htable *ht,
128 	     const struct dsthash_dst *dst)
129 {
130 	struct dsthash_ent *ent;
131 	struct hlist_node *pos;
132 	u_int32_t hash = hash_dst(ht, dst);
133 
134 	if (!hlist_empty(&ht->hash[hash])) {
135 		hlist_for_each_entry(ent, pos, &ht->hash[hash], node)
136 			if (dst_cmp(ent, dst))
137 				return ent;
138 	}
139 	return NULL;
140 }
141 
142 /* allocate dsthash_ent, initialize dst, put in htable and lock it */
143 static struct dsthash_ent *
dsthash_alloc_init(struct xt_hashlimit_htable * ht,const struct dsthash_dst * dst)144 dsthash_alloc_init(struct xt_hashlimit_htable *ht,
145 		   const struct dsthash_dst *dst)
146 {
147 	struct dsthash_ent *ent;
148 
149 	/* initialize hash with random val at the time we allocate
150 	 * the first hashtable entry */
151 	if (!ht->rnd_initialized) {
152 		get_random_bytes(&ht->rnd, 4);
153 		ht->rnd_initialized = 1;
154 	}
155 
156 	if (ht->cfg.max && ht->count >= ht->cfg.max) {
157 		/* FIXME: do something. question is what.. */
158 		if (net_ratelimit())
159 			printk(KERN_WARNING
160 				"xt_hashlimit: max count of %u reached\n",
161 				ht->cfg.max);
162 		return NULL;
163 	}
164 
165 	ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
166 	if (!ent) {
167 		if (net_ratelimit())
168 			printk(KERN_ERR
169 				"xt_hashlimit: can't allocate dsthash_ent\n");
170 		return NULL;
171 	}
172 	memcpy(&ent->dst, dst, sizeof(ent->dst));
173 
174 	hlist_add_head(&ent->node, &ht->hash[hash_dst(ht, dst)]);
175 	ht->count++;
176 	return ent;
177 }
178 
179 static inline void
dsthash_free(struct xt_hashlimit_htable * ht,struct dsthash_ent * ent)180 dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
181 {
182 	hlist_del(&ent->node);
183 	kmem_cache_free(hashlimit_cachep, ent);
184 	ht->count--;
185 }
186 static void htable_gc(unsigned long htlong);
187 
htable_create_v0(struct xt_hashlimit_info * minfo,u_int8_t family)188 static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
189 {
190 	struct xt_hashlimit_htable *hinfo;
191 	unsigned int size;
192 	unsigned int i;
193 
194 	if (minfo->cfg.size)
195 		size = minfo->cfg.size;
196 	else {
197 		size = ((num_physpages << PAGE_SHIFT) / 16384) /
198 		       sizeof(struct list_head);
199 		if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
200 			size = 8192;
201 		if (size < 16)
202 			size = 16;
203 	}
204 	/* FIXME: don't use vmalloc() here or anywhere else -HW */
205 	hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
206 			sizeof(struct list_head) * size);
207 	if (!hinfo) {
208 		printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
209 		return -1;
210 	}
211 	minfo->hinfo = hinfo;
212 
213 	/* copy match config into hashtable config */
214 	hinfo->cfg.mode        = minfo->cfg.mode;
215 	hinfo->cfg.avg         = minfo->cfg.avg;
216 	hinfo->cfg.burst       = minfo->cfg.burst;
217 	hinfo->cfg.max         = minfo->cfg.max;
218 	hinfo->cfg.gc_interval = minfo->cfg.gc_interval;
219 	hinfo->cfg.expire      = minfo->cfg.expire;
220 
221 	if (family == NFPROTO_IPV4)
222 		hinfo->cfg.srcmask = hinfo->cfg.dstmask = 32;
223 	else
224 		hinfo->cfg.srcmask = hinfo->cfg.dstmask = 128;
225 
226 	hinfo->cfg.size = size;
227 	if (!hinfo->cfg.max)
228 		hinfo->cfg.max = 8 * hinfo->cfg.size;
229 	else if (hinfo->cfg.max < hinfo->cfg.size)
230 		hinfo->cfg.max = hinfo->cfg.size;
231 
232 	for (i = 0; i < hinfo->cfg.size; i++)
233 		INIT_HLIST_HEAD(&hinfo->hash[i]);
234 
235 	atomic_set(&hinfo->use, 1);
236 	hinfo->count = 0;
237 	hinfo->family = family;
238 	hinfo->rnd_initialized = 0;
239 	spin_lock_init(&hinfo->lock);
240 	hinfo->pde = proc_create_data(minfo->name, 0,
241 		(family == NFPROTO_IPV4) ?
242 		hashlimit_procdir4 : hashlimit_procdir6,
243 		&dl_file_ops, hinfo);
244 	if (!hinfo->pde) {
245 		vfree(hinfo);
246 		return -1;
247 	}
248 
249 	setup_timer(&hinfo->timer, htable_gc, (unsigned long )hinfo);
250 	hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
251 	add_timer(&hinfo->timer);
252 
253 	spin_lock_bh(&hashlimit_lock);
254 	hlist_add_head(&hinfo->node, &hashlimit_htables);
255 	spin_unlock_bh(&hashlimit_lock);
256 
257 	return 0;
258 }
259 
htable_create(struct xt_hashlimit_mtinfo1 * minfo,u_int8_t family)260 static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
261 {
262 	struct xt_hashlimit_htable *hinfo;
263 	unsigned int size;
264 	unsigned int i;
265 
266 	if (minfo->cfg.size) {
267 		size = minfo->cfg.size;
268 	} else {
269 		size = (num_physpages << PAGE_SHIFT) / 16384 /
270 		       sizeof(struct list_head);
271 		if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE)
272 			size = 8192;
273 		if (size < 16)
274 			size = 16;
275 	}
276 	/* FIXME: don't use vmalloc() here or anywhere else -HW */
277 	hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
278 	                sizeof(struct list_head) * size);
279 	if (hinfo == NULL) {
280 		printk(KERN_ERR "xt_hashlimit: unable to create hashtable\n");
281 		return -1;
282 	}
283 	minfo->hinfo = hinfo;
284 
285 	/* copy match config into hashtable config */
286 	memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
287 	hinfo->cfg.size = size;
288 	if (hinfo->cfg.max == 0)
289 		hinfo->cfg.max = 8 * hinfo->cfg.size;
290 	else if (hinfo->cfg.max < hinfo->cfg.size)
291 		hinfo->cfg.max = hinfo->cfg.size;
292 
293 	for (i = 0; i < hinfo->cfg.size; i++)
294 		INIT_HLIST_HEAD(&hinfo->hash[i]);
295 
296 	atomic_set(&hinfo->use, 1);
297 	hinfo->count = 0;
298 	hinfo->family = family;
299 	hinfo->rnd_initialized = 0;
300 	spin_lock_init(&hinfo->lock);
301 
302 	hinfo->pde = proc_create_data(minfo->name, 0,
303 		(family == NFPROTO_IPV4) ?
304 		hashlimit_procdir4 : hashlimit_procdir6,
305 		&dl_file_ops, hinfo);
306 	if (hinfo->pde == NULL) {
307 		vfree(hinfo);
308 		return -1;
309 	}
310 
311 	setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
312 	hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
313 	add_timer(&hinfo->timer);
314 
315 	spin_lock_bh(&hashlimit_lock);
316 	hlist_add_head(&hinfo->node, &hashlimit_htables);
317 	spin_unlock_bh(&hashlimit_lock);
318 
319 	return 0;
320 }
321 
select_all(const struct xt_hashlimit_htable * ht,const struct dsthash_ent * he)322 static bool select_all(const struct xt_hashlimit_htable *ht,
323 		       const struct dsthash_ent *he)
324 {
325 	return 1;
326 }
327 
select_gc(const struct xt_hashlimit_htable * ht,const struct dsthash_ent * he)328 static bool select_gc(const struct xt_hashlimit_htable *ht,
329 		      const struct dsthash_ent *he)
330 {
331 	return time_after_eq(jiffies, he->expires);
332 }
333 
htable_selective_cleanup(struct xt_hashlimit_htable * ht,bool (* select)(const struct xt_hashlimit_htable * ht,const struct dsthash_ent * he))334 static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
335 			bool (*select)(const struct xt_hashlimit_htable *ht,
336 				      const struct dsthash_ent *he))
337 {
338 	unsigned int i;
339 
340 	/* lock hash table and iterate over it */
341 	spin_lock_bh(&ht->lock);
342 	for (i = 0; i < ht->cfg.size; i++) {
343 		struct dsthash_ent *dh;
344 		struct hlist_node *pos, *n;
345 		hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) {
346 			if ((*select)(ht, dh))
347 				dsthash_free(ht, dh);
348 		}
349 	}
350 	spin_unlock_bh(&ht->lock);
351 }
352 
353 /* hash table garbage collector, run by timer */
htable_gc(unsigned long htlong)354 static void htable_gc(unsigned long htlong)
355 {
356 	struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
357 
358 	htable_selective_cleanup(ht, select_gc);
359 
360 	/* re-add the timer accordingly */
361 	ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
362 	add_timer(&ht->timer);
363 }
364 
htable_destroy(struct xt_hashlimit_htable * hinfo)365 static void htable_destroy(struct xt_hashlimit_htable *hinfo)
366 {
367 	del_timer_sync(&hinfo->timer);
368 
369 	/* remove proc entry */
370 	remove_proc_entry(hinfo->pde->name,
371 			  hinfo->family == NFPROTO_IPV4 ? hashlimit_procdir4 :
372 						     hashlimit_procdir6);
373 	htable_selective_cleanup(hinfo, select_all);
374 	vfree(hinfo);
375 }
376 
htable_find_get(const char * name,u_int8_t family)377 static struct xt_hashlimit_htable *htable_find_get(const char *name,
378 						   u_int8_t family)
379 {
380 	struct xt_hashlimit_htable *hinfo;
381 	struct hlist_node *pos;
382 
383 	spin_lock_bh(&hashlimit_lock);
384 	hlist_for_each_entry(hinfo, pos, &hashlimit_htables, node) {
385 		if (!strcmp(name, hinfo->pde->name) &&
386 		    hinfo->family == family) {
387 			atomic_inc(&hinfo->use);
388 			spin_unlock_bh(&hashlimit_lock);
389 			return hinfo;
390 		}
391 	}
392 	spin_unlock_bh(&hashlimit_lock);
393 	return NULL;
394 }
395 
htable_put(struct xt_hashlimit_htable * hinfo)396 static void htable_put(struct xt_hashlimit_htable *hinfo)
397 {
398 	if (atomic_dec_and_test(&hinfo->use)) {
399 		spin_lock_bh(&hashlimit_lock);
400 		hlist_del(&hinfo->node);
401 		spin_unlock_bh(&hashlimit_lock);
402 		htable_destroy(hinfo);
403 	}
404 }
405 
406 /* The algorithm used is the Simple Token Bucket Filter (TBF)
407  * see net/sched/sch_tbf.c in the linux source tree
408  */
409 
410 /* Rusty: This is my (non-mathematically-inclined) understanding of
411    this algorithm.  The `average rate' in jiffies becomes your initial
412    amount of credit `credit' and the most credit you can ever have
413    `credit_cap'.  The `peak rate' becomes the cost of passing the
414    test, `cost'.
415 
416    `prev' tracks the last packet hit: you gain one credit per jiffy.
417    If you get credit balance more than this, the extra credit is
418    discarded.  Every time the match passes, you lose `cost' credits;
419    if you don't have that many, the test fails.
420 
421    See Alexey's formal explanation in net/sched/sch_tbf.c.
422 
423    To get the maximum range, we multiply by this factor (ie. you get N
424    credits per jiffy).  We want to allow a rate as low as 1 per day
425    (slowest userspace tool allows), which means
426    CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
427 */
428 #define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
429 
430 /* Repeated shift and or gives us all 1s, final shift and add 1 gives
431  * us the power of 2 below the theoretical max, so GCC simply does a
432  * shift. */
433 #define _POW2_BELOW2(x) ((x)|((x)>>1))
434 #define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
435 #define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
436 #define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
437 #define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
438 #define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
439 
440 #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
441 
442 /* Precision saver. */
443 static inline u_int32_t
user2credits(u_int32_t user)444 user2credits(u_int32_t user)
445 {
446 	/* If multiplying would overflow... */
447 	if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
448 		/* Divide first. */
449 		return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
450 
451 	return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
452 }
453 
rateinfo_recalc(struct dsthash_ent * dh,unsigned long now)454 static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
455 {
456 	dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
457 	if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
458 		dh->rateinfo.credit = dh->rateinfo.credit_cap;
459 	dh->rateinfo.prev = now;
460 }
461 
maskl(__be32 a,unsigned int l)462 static inline __be32 maskl(__be32 a, unsigned int l)
463 {
464 	return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0;
465 }
466 
467 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
hashlimit_ipv6_mask(__be32 * i,unsigned int p)468 static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
469 {
470 	switch (p) {
471 	case 0 ... 31:
472 		i[0] = maskl(i[0], p);
473 		i[1] = i[2] = i[3] = 0;
474 		break;
475 	case 32 ... 63:
476 		i[1] = maskl(i[1], p - 32);
477 		i[2] = i[3] = 0;
478 		break;
479 	case 64 ... 95:
480 		i[2] = maskl(i[2], p - 64);
481 		i[3] = 0;
482 	case 96 ... 127:
483 		i[3] = maskl(i[3], p - 96);
484 		break;
485 	case 128:
486 		break;
487 	}
488 }
489 #endif
490 
491 static int
hashlimit_init_dst(const struct xt_hashlimit_htable * hinfo,struct dsthash_dst * dst,const struct sk_buff * skb,unsigned int protoff)492 hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
493 		   struct dsthash_dst *dst,
494 		   const struct sk_buff *skb, unsigned int protoff)
495 {
496 	__be16 _ports[2], *ports;
497 	u8 nexthdr;
498 
499 	memset(dst, 0, sizeof(*dst));
500 
501 	switch (hinfo->family) {
502 	case NFPROTO_IPV4:
503 		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
504 			dst->ip.dst = maskl(ip_hdr(skb)->daddr,
505 			              hinfo->cfg.dstmask);
506 		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
507 			dst->ip.src = maskl(ip_hdr(skb)->saddr,
508 			              hinfo->cfg.srcmask);
509 
510 		if (!(hinfo->cfg.mode &
511 		      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
512 			return 0;
513 		nexthdr = ip_hdr(skb)->protocol;
514 		break;
515 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
516 	case NFPROTO_IPV6:
517 		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
518 			memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
519 			       sizeof(dst->ip6.dst));
520 			hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask);
521 		}
522 		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) {
523 			memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr,
524 			       sizeof(dst->ip6.src));
525 			hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask);
526 		}
527 
528 		if (!(hinfo->cfg.mode &
529 		      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
530 			return 0;
531 		nexthdr = ipv6_hdr(skb)->nexthdr;
532 		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
533 		if ((int)protoff < 0)
534 			return -1;
535 		break;
536 #endif
537 	default:
538 		BUG();
539 		return 0;
540 	}
541 
542 	switch (nexthdr) {
543 	case IPPROTO_TCP:
544 	case IPPROTO_UDP:
545 	case IPPROTO_UDPLITE:
546 	case IPPROTO_SCTP:
547 	case IPPROTO_DCCP:
548 		ports = skb_header_pointer(skb, protoff, sizeof(_ports),
549 					   &_ports);
550 		break;
551 	default:
552 		_ports[0] = _ports[1] = 0;
553 		ports = _ports;
554 		break;
555 	}
556 	if (!ports)
557 		return -1;
558 	if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT)
559 		dst->src_port = ports[0];
560 	if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT)
561 		dst->dst_port = ports[1];
562 	return 0;
563 }
564 
565 static bool
hashlimit_mt_v0(const struct sk_buff * skb,const struct xt_match_param * par)566 hashlimit_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
567 {
568 	const struct xt_hashlimit_info *r =
569 		((const struct xt_hashlimit_info *)par->matchinfo)->u.master;
570 	struct xt_hashlimit_htable *hinfo = r->hinfo;
571 	unsigned long now = jiffies;
572 	struct dsthash_ent *dh;
573 	struct dsthash_dst dst;
574 
575 	if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
576 		goto hotdrop;
577 
578 	spin_lock_bh(&hinfo->lock);
579 	dh = dsthash_find(hinfo, &dst);
580 	if (!dh) {
581 		dh = dsthash_alloc_init(hinfo, &dst);
582 		if (!dh) {
583 			spin_unlock_bh(&hinfo->lock);
584 			goto hotdrop;
585 		}
586 
587 		dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
588 		dh->rateinfo.prev = jiffies;
589 		dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
590 						   hinfo->cfg.burst);
591 		dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
592 						       hinfo->cfg.burst);
593 		dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
594 	} else {
595 		/* update expiration timeout */
596 		dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
597 		rateinfo_recalc(dh, now);
598 	}
599 
600 	if (dh->rateinfo.credit >= dh->rateinfo.cost) {
601 		/* We're underlimit. */
602 		dh->rateinfo.credit -= dh->rateinfo.cost;
603 		spin_unlock_bh(&hinfo->lock);
604 		return true;
605 	}
606 
607 	spin_unlock_bh(&hinfo->lock);
608 
609 	/* default case: we're overlimit, thus don't match */
610 	return false;
611 
612 hotdrop:
613 	*par->hotdrop = true;
614 	return false;
615 }
616 
617 static bool
hashlimit_mt(const struct sk_buff * skb,const struct xt_match_param * par)618 hashlimit_mt(const struct sk_buff *skb, const struct xt_match_param *par)
619 {
620 	const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
621 	struct xt_hashlimit_htable *hinfo = info->hinfo;
622 	unsigned long now = jiffies;
623 	struct dsthash_ent *dh;
624 	struct dsthash_dst dst;
625 
626 	if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
627 		goto hotdrop;
628 
629 	spin_lock_bh(&hinfo->lock);
630 	dh = dsthash_find(hinfo, &dst);
631 	if (dh == NULL) {
632 		dh = dsthash_alloc_init(hinfo, &dst);
633 		if (dh == NULL) {
634 			spin_unlock_bh(&hinfo->lock);
635 			goto hotdrop;
636 		}
637 
638 		dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
639 		dh->rateinfo.prev = jiffies;
640 		dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
641 		                      hinfo->cfg.burst);
642 		dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
643 		                          hinfo->cfg.burst);
644 		dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
645 	} else {
646 		/* update expiration timeout */
647 		dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
648 		rateinfo_recalc(dh, now);
649 	}
650 
651 	if (dh->rateinfo.credit >= dh->rateinfo.cost) {
652 		/* below the limit */
653 		dh->rateinfo.credit -= dh->rateinfo.cost;
654 		spin_unlock_bh(&hinfo->lock);
655 		return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
656 	}
657 
658 	spin_unlock_bh(&hinfo->lock);
659 	/* default match is underlimit - so over the limit, we need to invert */
660 	return info->cfg.mode & XT_HASHLIMIT_INVERT;
661 
662  hotdrop:
663 	*par->hotdrop = true;
664 	return false;
665 }
666 
hashlimit_mt_check_v0(const struct xt_mtchk_param * par)667 static bool hashlimit_mt_check_v0(const struct xt_mtchk_param *par)
668 {
669 	struct xt_hashlimit_info *r = par->matchinfo;
670 
671 	/* Check for overflow. */
672 	if (r->cfg.burst == 0 ||
673 	    user2credits(r->cfg.avg * r->cfg.burst) < user2credits(r->cfg.avg)) {
674 		printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
675 		       r->cfg.avg, r->cfg.burst);
676 		return false;
677 	}
678 	if (r->cfg.mode == 0 ||
679 	    r->cfg.mode > (XT_HASHLIMIT_HASH_DPT |
680 			   XT_HASHLIMIT_HASH_DIP |
681 			   XT_HASHLIMIT_HASH_SIP |
682 			   XT_HASHLIMIT_HASH_SPT))
683 		return false;
684 	if (!r->cfg.gc_interval)
685 		return false;
686 	if (!r->cfg.expire)
687 		return false;
688 	if (r->name[sizeof(r->name) - 1] != '\0')
689 		return false;
690 
691 	/* This is the best we've got: We cannot release and re-grab lock,
692 	 * since checkentry() is called before x_tables.c grabs xt_mutex.
693 	 * We also cannot grab the hashtable spinlock, since htable_create will
694 	 * call vmalloc, and that can sleep.  And we cannot just re-search
695 	 * the list of htable's in htable_create(), since then we would
696 	 * create duplicate proc files. -HW */
697 	mutex_lock(&hlimit_mutex);
698 	r->hinfo = htable_find_get(r->name, par->match->family);
699 	if (!r->hinfo && htable_create_v0(r, par->match->family) != 0) {
700 		mutex_unlock(&hlimit_mutex);
701 		return false;
702 	}
703 	mutex_unlock(&hlimit_mutex);
704 
705 	/* Ugly hack: For SMP, we only want to use one set */
706 	r->u.master = r;
707 	return true;
708 }
709 
hashlimit_mt_check(const struct xt_mtchk_param * par)710 static bool hashlimit_mt_check(const struct xt_mtchk_param *par)
711 {
712 	struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
713 
714 	/* Check for overflow. */
715 	if (info->cfg.burst == 0 ||
716 	    user2credits(info->cfg.avg * info->cfg.burst) <
717 	    user2credits(info->cfg.avg)) {
718 		printk(KERN_ERR "xt_hashlimit: overflow, try lower: %u/%u\n",
719 		       info->cfg.avg, info->cfg.burst);
720 		return false;
721 	}
722 	if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
723 		return false;
724 	if (info->name[sizeof(info->name)-1] != '\0')
725 		return false;
726 	if (par->match->family == NFPROTO_IPV4) {
727 		if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
728 			return false;
729 	} else {
730 		if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128)
731 			return false;
732 	}
733 
734 	/* This is the best we've got: We cannot release and re-grab lock,
735 	 * since checkentry() is called before x_tables.c grabs xt_mutex.
736 	 * We also cannot grab the hashtable spinlock, since htable_create will
737 	 * call vmalloc, and that can sleep.  And we cannot just re-search
738 	 * the list of htable's in htable_create(), since then we would
739 	 * create duplicate proc files. -HW */
740 	mutex_lock(&hlimit_mutex);
741 	info->hinfo = htable_find_get(info->name, par->match->family);
742 	if (!info->hinfo && htable_create(info, par->match->family) != 0) {
743 		mutex_unlock(&hlimit_mutex);
744 		return false;
745 	}
746 	mutex_unlock(&hlimit_mutex);
747 	return true;
748 }
749 
750 static void
hashlimit_mt_destroy_v0(const struct xt_mtdtor_param * par)751 hashlimit_mt_destroy_v0(const struct xt_mtdtor_param *par)
752 {
753 	const struct xt_hashlimit_info *r = par->matchinfo;
754 
755 	htable_put(r->hinfo);
756 }
757 
hashlimit_mt_destroy(const struct xt_mtdtor_param * par)758 static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
759 {
760 	const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
761 
762 	htable_put(info->hinfo);
763 }
764 
765 #ifdef CONFIG_COMPAT
766 struct compat_xt_hashlimit_info {
767 	char name[IFNAMSIZ];
768 	struct hashlimit_cfg cfg;
769 	compat_uptr_t hinfo;
770 	compat_uptr_t master;
771 };
772 
hashlimit_mt_compat_from_user(void * dst,void * src)773 static void hashlimit_mt_compat_from_user(void *dst, void *src)
774 {
775 	int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
776 
777 	memcpy(dst, src, off);
778 	memset(dst + off, 0, sizeof(struct compat_xt_hashlimit_info) - off);
779 }
780 
hashlimit_mt_compat_to_user(void __user * dst,void * src)781 static int hashlimit_mt_compat_to_user(void __user *dst, void *src)
782 {
783 	int off = offsetof(struct compat_xt_hashlimit_info, hinfo);
784 
785 	return copy_to_user(dst, src, off) ? -EFAULT : 0;
786 }
787 #endif
788 
789 static struct xt_match hashlimit_mt_reg[] __read_mostly = {
790 	{
791 		.name		= "hashlimit",
792 		.revision	= 0,
793 		.family		= NFPROTO_IPV4,
794 		.match		= hashlimit_mt_v0,
795 		.matchsize	= sizeof(struct xt_hashlimit_info),
796 #ifdef CONFIG_COMPAT
797 		.compatsize	= sizeof(struct compat_xt_hashlimit_info),
798 		.compat_from_user = hashlimit_mt_compat_from_user,
799 		.compat_to_user	= hashlimit_mt_compat_to_user,
800 #endif
801 		.checkentry	= hashlimit_mt_check_v0,
802 		.destroy	= hashlimit_mt_destroy_v0,
803 		.me		= THIS_MODULE
804 	},
805 	{
806 		.name           = "hashlimit",
807 		.revision       = 1,
808 		.family         = NFPROTO_IPV4,
809 		.match          = hashlimit_mt,
810 		.matchsize      = sizeof(struct xt_hashlimit_mtinfo1),
811 		.checkentry     = hashlimit_mt_check,
812 		.destroy        = hashlimit_mt_destroy,
813 		.me             = THIS_MODULE,
814 	},
815 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
816 	{
817 		.name		= "hashlimit",
818 		.family		= NFPROTO_IPV6,
819 		.match		= hashlimit_mt_v0,
820 		.matchsize	= sizeof(struct xt_hashlimit_info),
821 #ifdef CONFIG_COMPAT
822 		.compatsize	= sizeof(struct compat_xt_hashlimit_info),
823 		.compat_from_user = hashlimit_mt_compat_from_user,
824 		.compat_to_user	= hashlimit_mt_compat_to_user,
825 #endif
826 		.checkentry	= hashlimit_mt_check_v0,
827 		.destroy	= hashlimit_mt_destroy_v0,
828 		.me		= THIS_MODULE
829 	},
830 	{
831 		.name           = "hashlimit",
832 		.revision       = 1,
833 		.family         = NFPROTO_IPV6,
834 		.match          = hashlimit_mt,
835 		.matchsize      = sizeof(struct xt_hashlimit_mtinfo1),
836 		.checkentry     = hashlimit_mt_check,
837 		.destroy        = hashlimit_mt_destroy,
838 		.me             = THIS_MODULE,
839 	},
840 #endif
841 };
842 
843 /* PROC stuff */
dl_seq_start(struct seq_file * s,loff_t * pos)844 static void *dl_seq_start(struct seq_file *s, loff_t *pos)
845 	__acquires(htable->lock)
846 {
847 	struct proc_dir_entry *pde = s->private;
848 	struct xt_hashlimit_htable *htable = pde->data;
849 	unsigned int *bucket;
850 
851 	spin_lock_bh(&htable->lock);
852 	if (*pos >= htable->cfg.size)
853 		return NULL;
854 
855 	bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
856 	if (!bucket)
857 		return ERR_PTR(-ENOMEM);
858 
859 	*bucket = *pos;
860 	return bucket;
861 }
862 
dl_seq_next(struct seq_file * s,void * v,loff_t * pos)863 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
864 {
865 	struct proc_dir_entry *pde = s->private;
866 	struct xt_hashlimit_htable *htable = pde->data;
867 	unsigned int *bucket = (unsigned int *)v;
868 
869 	*pos = ++(*bucket);
870 	if (*pos >= htable->cfg.size) {
871 		kfree(v);
872 		return NULL;
873 	}
874 	return bucket;
875 }
876 
dl_seq_stop(struct seq_file * s,void * v)877 static void dl_seq_stop(struct seq_file *s, void *v)
878 	__releases(htable->lock)
879 {
880 	struct proc_dir_entry *pde = s->private;
881 	struct xt_hashlimit_htable *htable = pde->data;
882 	unsigned int *bucket = (unsigned int *)v;
883 
884 	kfree(bucket);
885 	spin_unlock_bh(&htable->lock);
886 }
887 
dl_seq_real_show(struct dsthash_ent * ent,u_int8_t family,struct seq_file * s)888 static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
889 				   struct seq_file *s)
890 {
891 	/* recalculate to show accurate numbers */
892 	rateinfo_recalc(ent, jiffies);
893 
894 	switch (family) {
895 	case NFPROTO_IPV4:
896 		return seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
897 				 (long)(ent->expires - jiffies)/HZ,
898 				 &ent->dst.ip.src,
899 				 ntohs(ent->dst.src_port),
900 				 &ent->dst.ip.dst,
901 				 ntohs(ent->dst.dst_port),
902 				 ent->rateinfo.credit, ent->rateinfo.credit_cap,
903 				 ent->rateinfo.cost);
904 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
905 	case NFPROTO_IPV6:
906 		return seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
907 				 (long)(ent->expires - jiffies)/HZ,
908 				 &ent->dst.ip6.src,
909 				 ntohs(ent->dst.src_port),
910 				 &ent->dst.ip6.dst,
911 				 ntohs(ent->dst.dst_port),
912 				 ent->rateinfo.credit, ent->rateinfo.credit_cap,
913 				 ent->rateinfo.cost);
914 #endif
915 	default:
916 		BUG();
917 		return 0;
918 	}
919 }
920 
dl_seq_show(struct seq_file * s,void * v)921 static int dl_seq_show(struct seq_file *s, void *v)
922 {
923 	struct proc_dir_entry *pde = s->private;
924 	struct xt_hashlimit_htable *htable = pde->data;
925 	unsigned int *bucket = (unsigned int *)v;
926 	struct dsthash_ent *ent;
927 	struct hlist_node *pos;
928 
929 	if (!hlist_empty(&htable->hash[*bucket])) {
930 		hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node)
931 			if (dl_seq_real_show(ent, htable->family, s))
932 				return 1;
933 	}
934 	return 0;
935 }
936 
937 static const struct seq_operations dl_seq_ops = {
938 	.start = dl_seq_start,
939 	.next  = dl_seq_next,
940 	.stop  = dl_seq_stop,
941 	.show  = dl_seq_show
942 };
943 
dl_proc_open(struct inode * inode,struct file * file)944 static int dl_proc_open(struct inode *inode, struct file *file)
945 {
946 	int ret = seq_open(file, &dl_seq_ops);
947 
948 	if (!ret) {
949 		struct seq_file *sf = file->private_data;
950 		sf->private = PDE(inode);
951 	}
952 	return ret;
953 }
954 
955 static const struct file_operations dl_file_ops = {
956 	.owner   = THIS_MODULE,
957 	.open    = dl_proc_open,
958 	.read    = seq_read,
959 	.llseek  = seq_lseek,
960 	.release = seq_release
961 };
962 
hashlimit_mt_init(void)963 static int __init hashlimit_mt_init(void)
964 {
965 	int err;
966 
967 	err = xt_register_matches(hashlimit_mt_reg,
968 	      ARRAY_SIZE(hashlimit_mt_reg));
969 	if (err < 0)
970 		goto err1;
971 
972 	err = -ENOMEM;
973 	hashlimit_cachep = kmem_cache_create("xt_hashlimit",
974 					    sizeof(struct dsthash_ent), 0, 0,
975 					    NULL);
976 	if (!hashlimit_cachep) {
977 		printk(KERN_ERR "xt_hashlimit: unable to create slab cache\n");
978 		goto err2;
979 	}
980 	hashlimit_procdir4 = proc_mkdir("ipt_hashlimit", init_net.proc_net);
981 	if (!hashlimit_procdir4) {
982 		printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
983 				"entry\n");
984 		goto err3;
985 	}
986 	err = 0;
987 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
988 	hashlimit_procdir6 = proc_mkdir("ip6t_hashlimit", init_net.proc_net);
989 	if (!hashlimit_procdir6) {
990 		printk(KERN_ERR "xt_hashlimit: unable to create proc dir "
991 				"entry\n");
992 		err = -ENOMEM;
993 	}
994 #endif
995 	if (!err)
996 		return 0;
997 	remove_proc_entry("ipt_hashlimit", init_net.proc_net);
998 err3:
999 	kmem_cache_destroy(hashlimit_cachep);
1000 err2:
1001 	xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
1002 err1:
1003 	return err;
1004 
1005 }
1006 
hashlimit_mt_exit(void)1007 static void __exit hashlimit_mt_exit(void)
1008 {
1009 	remove_proc_entry("ipt_hashlimit", init_net.proc_net);
1010 #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
1011 	remove_proc_entry("ip6t_hashlimit", init_net.proc_net);
1012 #endif
1013 	kmem_cache_destroy(hashlimit_cachep);
1014 	xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
1015 }
1016 
1017 module_init(hashlimit_mt_init);
1018 module_exit(hashlimit_mt_exit);
1019