• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  ebtables
3  *
4  *  Author:
5  *  Bart De Schuymer		<bdschuym@pandora.be>
6  *
7  *  ebtables.c,v 2.0, July, 2002
8  *
9  *  This code is stongly inspired on the iptables code which is
10  *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11  *
12  *  This program is free software; you can redistribute it and/or
13  *  modify it under the terms of the GNU General Public License
14  *  as published by the Free Software Foundation; either version
15  *  2 of the License, or (at your option) any later version.
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <linux/audit.h>
30 #include <net/sock.h>
31 /* needed for logical [in,out]-dev filtering */
32 #include "../br_private.h"
33 
34 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
35 					 "report to author: "format, ## args)
36 /* #define BUGPRINT(format, args...) */
37 
38 /*
39  * Each cpu has its own set of counters, so there is no need for write_lock in
40  * the softirq
41  * For reading or updating the counters, the user context needs to
42  * get a write_lock
43  */
44 
45 /* The size of each set of counters is altered to get cache alignment */
46 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
47 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
48 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
49    COUNTER_OFFSET(n) * cpu))
50 
51 
52 
53 static DEFINE_MUTEX(ebt_mutex);
54 
55 #ifdef CONFIG_COMPAT
ebt_standard_compat_from_user(void * dst,const void * src)56 static void ebt_standard_compat_from_user(void *dst, const void *src)
57 {
58 	int v = *(compat_int_t *)src;
59 
60 	if (v >= 0)
61 		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
62 	memcpy(dst, &v, sizeof(v));
63 }
64 
ebt_standard_compat_to_user(void __user * dst,const void * src)65 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66 {
67 	compat_int_t cv = *(int *)src;
68 
69 	if (cv >= 0)
70 		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
71 	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
72 }
73 #endif
74 
75 
76 static struct xt_target ebt_standard_target = {
77 	.name       = "standard",
78 	.revision   = 0,
79 	.family     = NFPROTO_BRIDGE,
80 	.targetsize = sizeof(int),
81 #ifdef CONFIG_COMPAT
82 	.compatsize = sizeof(compat_int_t),
83 	.compat_from_user = ebt_standard_compat_from_user,
84 	.compat_to_user =  ebt_standard_compat_to_user,
85 #endif
86 };
87 
88 static inline int
ebt_do_watcher(const struct ebt_entry_watcher * w,struct sk_buff * skb,struct xt_action_param * par)89 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
90 	       struct xt_action_param *par)
91 {
92 	par->target   = w->u.watcher;
93 	par->targinfo = w->data;
94 	w->u.watcher->target(skb, par);
95 	/* watchers don't give a verdict */
96 	return 0;
97 }
98 
99 static inline int
ebt_do_match(struct ebt_entry_match * m,const struct sk_buff * skb,struct xt_action_param * par)100 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
101 	     struct xt_action_param *par)
102 {
103 	par->match     = m->u.match;
104 	par->matchinfo = m->data;
105 	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
106 }
107 
108 static inline int
ebt_dev_check(const char * entry,const struct net_device * device)109 ebt_dev_check(const char *entry, const struct net_device *device)
110 {
111 	int i = 0;
112 	const char *devname;
113 
114 	if (*entry == '\0')
115 		return 0;
116 	if (!device)
117 		return 1;
118 	devname = device->name;
119 	/* 1 is the wildcard token */
120 	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
121 		i++;
122 	return devname[i] != entry[i] && entry[i] != 1;
123 }
124 
125 #define FWINV2(bool, invflg) ((bool) ^ !!(e->invflags & invflg))
126 /* process standard matches */
127 static inline int
ebt_basic_match(const struct ebt_entry * e,const struct sk_buff * skb,const struct net_device * in,const struct net_device * out)128 ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
129                 const struct net_device *in, const struct net_device *out)
130 {
131 	const struct ethhdr *h = eth_hdr(skb);
132 	const struct net_bridge_port *p;
133 	__be16 ethproto;
134 	int verdict, i;
135 
136 	if (vlan_tx_tag_present(skb))
137 		ethproto = htons(ETH_P_8021Q);
138 	else
139 		ethproto = h->h_proto;
140 
141 	if (e->bitmask & EBT_802_3) {
142 		if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
143 			return 1;
144 	} else if (!(e->bitmask & EBT_NOPROTO) &&
145 	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
146 		return 1;
147 
148 	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
149 		return 1;
150 	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
151 		return 1;
152 	/* rcu_read_lock()ed by nf_hook_slow */
153 	if (in && (p = br_port_get_rcu(in)) != NULL &&
154 	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
155 		return 1;
156 	if (out && (p = br_port_get_rcu(out)) != NULL &&
157 	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
158 		return 1;
159 
160 	if (e->bitmask & EBT_SOURCEMAC) {
161 		verdict = 0;
162 		for (i = 0; i < 6; i++)
163 			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
164 			   e->sourcemsk[i];
165 		if (FWINV2(verdict != 0, EBT_ISOURCE) )
166 			return 1;
167 	}
168 	if (e->bitmask & EBT_DESTMAC) {
169 		verdict = 0;
170 		for (i = 0; i < 6; i++)
171 			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
172 			   e->destmsk[i];
173 		if (FWINV2(verdict != 0, EBT_IDEST) )
174 			return 1;
175 	}
176 	return 0;
177 }
178 
179 static inline __pure
ebt_next_entry(const struct ebt_entry * entry)180 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
181 {
182 	return (void *)entry + entry->next_offset;
183 }
184 
185 /* Do some firewalling */
ebt_do_table(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,struct ebt_table * table)186 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
187    const struct net_device *in, const struct net_device *out,
188    struct ebt_table *table)
189 {
190 	int i, nentries;
191 	struct ebt_entry *point;
192 	struct ebt_counter *counter_base, *cb_base;
193 	const struct ebt_entry_target *t;
194 	int verdict, sp = 0;
195 	struct ebt_chainstack *cs;
196 	struct ebt_entries *chaininfo;
197 	const char *base;
198 	const struct ebt_table_info *private;
199 	struct xt_action_param acpar;
200 
201 	acpar.family  = NFPROTO_BRIDGE;
202 	acpar.in      = in;
203 	acpar.out     = out;
204 	acpar.hotdrop = false;
205 	acpar.hooknum = hook;
206 
207 	read_lock_bh(&table->lock);
208 	private = table->private;
209 	cb_base = COUNTER_BASE(private->counters, private->nentries,
210 	   smp_processor_id());
211 	if (private->chainstack)
212 		cs = private->chainstack[smp_processor_id()];
213 	else
214 		cs = NULL;
215 	chaininfo = private->hook_entry[hook];
216 	nentries = private->hook_entry[hook]->nentries;
217 	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
218 	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
219 	/* base for chain jumps */
220 	base = private->entries;
221 	i = 0;
222 	while (i < nentries) {
223 		if (ebt_basic_match(point, skb, in, out))
224 			goto letscontinue;
225 
226 		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
227 			goto letscontinue;
228 		if (acpar.hotdrop) {
229 			read_unlock_bh(&table->lock);
230 			return NF_DROP;
231 		}
232 
233 		/* increase counter */
234 		(*(counter_base + i)).pcnt++;
235 		(*(counter_base + i)).bcnt += skb->len;
236 
237 		/* these should only watch: not modify, nor tell us
238 		   what to do with the packet */
239 		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
240 
241 		t = (struct ebt_entry_target *)
242 		   (((char *)point) + point->target_offset);
243 		/* standard target */
244 		if (!t->u.target->target)
245 			verdict = ((struct ebt_standard_target *)t)->verdict;
246 		else {
247 			acpar.target   = t->u.target;
248 			acpar.targinfo = t->data;
249 			verdict = t->u.target->target(skb, &acpar);
250 		}
251 		if (verdict == EBT_ACCEPT) {
252 			read_unlock_bh(&table->lock);
253 			return NF_ACCEPT;
254 		}
255 		if (verdict == EBT_DROP) {
256 			read_unlock_bh(&table->lock);
257 			return NF_DROP;
258 		}
259 		if (verdict == EBT_RETURN) {
260 letsreturn:
261 #ifdef CONFIG_NETFILTER_DEBUG
262 			if (sp == 0) {
263 				BUGPRINT("RETURN on base chain");
264 				/* act like this is EBT_CONTINUE */
265 				goto letscontinue;
266 			}
267 #endif
268 			sp--;
269 			/* put all the local variables right */
270 			i = cs[sp].n;
271 			chaininfo = cs[sp].chaininfo;
272 			nentries = chaininfo->nentries;
273 			point = cs[sp].e;
274 			counter_base = cb_base +
275 			   chaininfo->counter_offset;
276 			continue;
277 		}
278 		if (verdict == EBT_CONTINUE)
279 			goto letscontinue;
280 #ifdef CONFIG_NETFILTER_DEBUG
281 		if (verdict < 0) {
282 			BUGPRINT("bogus standard verdict\n");
283 			read_unlock_bh(&table->lock);
284 			return NF_DROP;
285 		}
286 #endif
287 		/* jump to a udc */
288 		cs[sp].n = i + 1;
289 		cs[sp].chaininfo = chaininfo;
290 		cs[sp].e = ebt_next_entry(point);
291 		i = 0;
292 		chaininfo = (struct ebt_entries *) (base + verdict);
293 #ifdef CONFIG_NETFILTER_DEBUG
294 		if (chaininfo->distinguisher) {
295 			BUGPRINT("jump to non-chain\n");
296 			read_unlock_bh(&table->lock);
297 			return NF_DROP;
298 		}
299 #endif
300 		nentries = chaininfo->nentries;
301 		point = (struct ebt_entry *)chaininfo->data;
302 		counter_base = cb_base + chaininfo->counter_offset;
303 		sp++;
304 		continue;
305 letscontinue:
306 		point = ebt_next_entry(point);
307 		i++;
308 	}
309 
310 	/* I actually like this :) */
311 	if (chaininfo->policy == EBT_RETURN)
312 		goto letsreturn;
313 	if (chaininfo->policy == EBT_ACCEPT) {
314 		read_unlock_bh(&table->lock);
315 		return NF_ACCEPT;
316 	}
317 	read_unlock_bh(&table->lock);
318 	return NF_DROP;
319 }
320 
321 /* If it succeeds, returns element and locks mutex */
322 static inline void *
find_inlist_lock_noload(struct list_head * head,const char * name,int * error,struct mutex * mutex)323 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
324    struct mutex *mutex)
325 {
326 	struct {
327 		struct list_head list;
328 		char name[EBT_FUNCTION_MAXNAMELEN];
329 	} *e;
330 
331 	mutex_lock(mutex);
332 	list_for_each_entry(e, head, list) {
333 		if (strcmp(e->name, name) == 0)
334 			return e;
335 	}
336 	*error = -ENOENT;
337 	mutex_unlock(mutex);
338 	return NULL;
339 }
340 
341 static void *
find_inlist_lock(struct list_head * head,const char * name,const char * prefix,int * error,struct mutex * mutex)342 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
343    int *error, struct mutex *mutex)
344 {
345 	return try_then_request_module(
346 			find_inlist_lock_noload(head, name, error, mutex),
347 			"%s%s", prefix, name);
348 }
349 
350 static inline struct ebt_table *
find_table_lock(struct net * net,const char * name,int * error,struct mutex * mutex)351 find_table_lock(struct net *net, const char *name, int *error,
352 		struct mutex *mutex)
353 {
354 	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
355 				"ebtable_", error, mutex);
356 }
357 
358 static inline int
ebt_check_match(struct ebt_entry_match * m,struct xt_mtchk_param * par,unsigned int * cnt)359 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
360 		unsigned int *cnt)
361 {
362 	const struct ebt_entry *e = par->entryinfo;
363 	struct xt_match *match;
364 	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
365 	int ret;
366 
367 	if (left < sizeof(struct ebt_entry_match) ||
368 	    left - sizeof(struct ebt_entry_match) < m->match_size)
369 		return -EINVAL;
370 
371 	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
372 	if (IS_ERR(match))
373 		return PTR_ERR(match);
374 	m->u.match = match;
375 
376 	par->match     = match;
377 	par->matchinfo = m->data;
378 	ret = xt_check_match(par, m->match_size,
379 	      e->ethproto, e->invflags & EBT_IPROTO);
380 	if (ret < 0) {
381 		module_put(match->me);
382 		return ret;
383 	}
384 
385 	(*cnt)++;
386 	return 0;
387 }
388 
389 static inline int
ebt_check_watcher(struct ebt_entry_watcher * w,struct xt_tgchk_param * par,unsigned int * cnt)390 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
391 		  unsigned int *cnt)
392 {
393 	const struct ebt_entry *e = par->entryinfo;
394 	struct xt_target *watcher;
395 	size_t left = ((char *)e + e->target_offset) - (char *)w;
396 	int ret;
397 
398 	if (left < sizeof(struct ebt_entry_watcher) ||
399 	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
400 		return -EINVAL;
401 
402 	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
403 	if (IS_ERR(watcher))
404 		return PTR_ERR(watcher);
405 	w->u.watcher = watcher;
406 
407 	par->target   = watcher;
408 	par->targinfo = w->data;
409 	ret = xt_check_target(par, w->watcher_size,
410 	      e->ethproto, e->invflags & EBT_IPROTO);
411 	if (ret < 0) {
412 		module_put(watcher->me);
413 		return ret;
414 	}
415 
416 	(*cnt)++;
417 	return 0;
418 }
419 
ebt_verify_pointers(const struct ebt_replace * repl,struct ebt_table_info * newinfo)420 static int ebt_verify_pointers(const struct ebt_replace *repl,
421 			       struct ebt_table_info *newinfo)
422 {
423 	unsigned int limit = repl->entries_size;
424 	unsigned int valid_hooks = repl->valid_hooks;
425 	unsigned int offset = 0;
426 	int i;
427 
428 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
429 		newinfo->hook_entry[i] = NULL;
430 
431 	newinfo->entries_size = repl->entries_size;
432 	newinfo->nentries = repl->nentries;
433 
434 	while (offset < limit) {
435 		size_t left = limit - offset;
436 		struct ebt_entry *e = (void *)newinfo->entries + offset;
437 
438 		if (left < sizeof(unsigned int))
439 			break;
440 
441 		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
442 			if ((valid_hooks & (1 << i)) == 0)
443 				continue;
444 			if ((char __user *)repl->hook_entry[i] ==
445 			     repl->entries + offset)
446 				break;
447 		}
448 
449 		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
450 			if (e->bitmask != 0) {
451 				/* we make userspace set this right,
452 				   so there is no misunderstanding */
453 				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
454 					 "in distinguisher\n");
455 				return -EINVAL;
456 			}
457 			if (i != NF_BR_NUMHOOKS)
458 				newinfo->hook_entry[i] = (struct ebt_entries *)e;
459 			if (left < sizeof(struct ebt_entries))
460 				break;
461 			offset += sizeof(struct ebt_entries);
462 		} else {
463 			if (left < sizeof(struct ebt_entry))
464 				break;
465 			if (left < e->next_offset)
466 				break;
467 			if (e->next_offset < sizeof(struct ebt_entry))
468 				return -EINVAL;
469 			offset += e->next_offset;
470 		}
471 	}
472 	if (offset != limit) {
473 		BUGPRINT("entries_size too small\n");
474 		return -EINVAL;
475 	}
476 
477 	/* check if all valid hooks have a chain */
478 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
479 		if (!newinfo->hook_entry[i] &&
480 		   (valid_hooks & (1 << i))) {
481 			BUGPRINT("Valid hook without chain\n");
482 			return -EINVAL;
483 		}
484 	}
485 	return 0;
486 }
487 
488 /*
489  * this one is very careful, as it is the first function
490  * to parse the userspace data
491  */
492 static inline int
ebt_check_entry_size_and_hooks(const struct ebt_entry * e,const struct ebt_table_info * newinfo,unsigned int * n,unsigned int * cnt,unsigned int * totalcnt,unsigned int * udc_cnt)493 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
494    const struct ebt_table_info *newinfo,
495    unsigned int *n, unsigned int *cnt,
496    unsigned int *totalcnt, unsigned int *udc_cnt)
497 {
498 	int i;
499 
500 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
501 		if ((void *)e == (void *)newinfo->hook_entry[i])
502 			break;
503 	}
504 	/* beginning of a new chain
505 	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
506 	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
507 		/* this checks if the previous chain has as many entries
508 		   as it said it has */
509 		if (*n != *cnt) {
510 			BUGPRINT("nentries does not equal the nr of entries "
511 				 "in the chain\n");
512 			return -EINVAL;
513 		}
514 		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
515 		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
516 			/* only RETURN from udc */
517 			if (i != NF_BR_NUMHOOKS ||
518 			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
519 				BUGPRINT("bad policy\n");
520 				return -EINVAL;
521 			}
522 		}
523 		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
524 			(*udc_cnt)++;
525 		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
526 			BUGPRINT("counter_offset != totalcnt");
527 			return -EINVAL;
528 		}
529 		*n = ((struct ebt_entries *)e)->nentries;
530 		*cnt = 0;
531 		return 0;
532 	}
533 	/* a plain old entry, heh */
534 	if (sizeof(struct ebt_entry) > e->watchers_offset ||
535 	   e->watchers_offset > e->target_offset ||
536 	   e->target_offset >= e->next_offset) {
537 		BUGPRINT("entry offsets not in right order\n");
538 		return -EINVAL;
539 	}
540 	/* this is not checked anywhere else */
541 	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
542 		BUGPRINT("target size too small\n");
543 		return -EINVAL;
544 	}
545 	(*cnt)++;
546 	(*totalcnt)++;
547 	return 0;
548 }
549 
550 struct ebt_cl_stack
551 {
552 	struct ebt_chainstack cs;
553 	int from;
554 	unsigned int hookmask;
555 };
556 
557 /*
558  * we need these positions to check that the jumps to a different part of the
559  * entries is a jump to the beginning of a new chain.
560  */
561 static inline int
ebt_get_udc_positions(struct ebt_entry * e,struct ebt_table_info * newinfo,unsigned int * n,struct ebt_cl_stack * udc)562 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
563    unsigned int *n, struct ebt_cl_stack *udc)
564 {
565 	int i;
566 
567 	/* we're only interested in chain starts */
568 	if (e->bitmask)
569 		return 0;
570 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
571 		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
572 			break;
573 	}
574 	/* only care about udc */
575 	if (i != NF_BR_NUMHOOKS)
576 		return 0;
577 
578 	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
579 	/* these initialisations are depended on later in check_chainloops() */
580 	udc[*n].cs.n = 0;
581 	udc[*n].hookmask = 0;
582 
583 	(*n)++;
584 	return 0;
585 }
586 
587 static inline int
ebt_cleanup_match(struct ebt_entry_match * m,struct net * net,unsigned int * i)588 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
589 {
590 	struct xt_mtdtor_param par;
591 
592 	if (i && (*i)-- == 0)
593 		return 1;
594 
595 	par.net       = net;
596 	par.match     = m->u.match;
597 	par.matchinfo = m->data;
598 	par.family    = NFPROTO_BRIDGE;
599 	if (par.match->destroy != NULL)
600 		par.match->destroy(&par);
601 	module_put(par.match->me);
602 	return 0;
603 }
604 
605 static inline int
ebt_cleanup_watcher(struct ebt_entry_watcher * w,struct net * net,unsigned int * i)606 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
607 {
608 	struct xt_tgdtor_param par;
609 
610 	if (i && (*i)-- == 0)
611 		return 1;
612 
613 	par.net      = net;
614 	par.target   = w->u.watcher;
615 	par.targinfo = w->data;
616 	par.family   = NFPROTO_BRIDGE;
617 	if (par.target->destroy != NULL)
618 		par.target->destroy(&par);
619 	module_put(par.target->me);
620 	return 0;
621 }
622 
623 static inline int
ebt_cleanup_entry(struct ebt_entry * e,struct net * net,unsigned int * cnt)624 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
625 {
626 	struct xt_tgdtor_param par;
627 	struct ebt_entry_target *t;
628 
629 	if (e->bitmask == 0)
630 		return 0;
631 	/* we're done */
632 	if (cnt && (*cnt)-- == 0)
633 		return 1;
634 	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
635 	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
636 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
637 
638 	par.net      = net;
639 	par.target   = t->u.target;
640 	par.targinfo = t->data;
641 	par.family   = NFPROTO_BRIDGE;
642 	if (par.target->destroy != NULL)
643 		par.target->destroy(&par);
644 	module_put(par.target->me);
645 	return 0;
646 }
647 
648 static inline int
ebt_check_entry(struct ebt_entry * e,struct net * net,const struct ebt_table_info * newinfo,const char * name,unsigned int * cnt,struct ebt_cl_stack * cl_s,unsigned int udc_cnt)649 ebt_check_entry(struct ebt_entry *e, struct net *net,
650    const struct ebt_table_info *newinfo,
651    const char *name, unsigned int *cnt,
652    struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
653 {
654 	struct ebt_entry_target *t;
655 	struct xt_target *target;
656 	unsigned int i, j, hook = 0, hookmask = 0;
657 	size_t gap;
658 	int ret;
659 	struct xt_mtchk_param mtpar;
660 	struct xt_tgchk_param tgpar;
661 
662 	/* don't mess with the struct ebt_entries */
663 	if (e->bitmask == 0)
664 		return 0;
665 
666 	if (e->bitmask & ~EBT_F_MASK) {
667 		BUGPRINT("Unknown flag for bitmask\n");
668 		return -EINVAL;
669 	}
670 	if (e->invflags & ~EBT_INV_MASK) {
671 		BUGPRINT("Unknown flag for inv bitmask\n");
672 		return -EINVAL;
673 	}
674 	if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
675 		BUGPRINT("NOPROTO & 802_3 not allowed\n");
676 		return -EINVAL;
677 	}
678 	/* what hook do we belong to? */
679 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
680 		if (!newinfo->hook_entry[i])
681 			continue;
682 		if ((char *)newinfo->hook_entry[i] < (char *)e)
683 			hook = i;
684 		else
685 			break;
686 	}
687 	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
688 	   a base chain */
689 	if (i < NF_BR_NUMHOOKS)
690 		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
691 	else {
692 		for (i = 0; i < udc_cnt; i++)
693 			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
694 				break;
695 		if (i == 0)
696 			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
697 		else
698 			hookmask = cl_s[i - 1].hookmask;
699 	}
700 	i = 0;
701 
702 	mtpar.net	= tgpar.net       = net;
703 	mtpar.table     = tgpar.table     = name;
704 	mtpar.entryinfo = tgpar.entryinfo = e;
705 	mtpar.hook_mask = tgpar.hook_mask = hookmask;
706 	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
707 	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
708 	if (ret != 0)
709 		goto cleanup_matches;
710 	j = 0;
711 	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
712 	if (ret != 0)
713 		goto cleanup_watchers;
714 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
715 	gap = e->next_offset - e->target_offset;
716 
717 	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
718 	if (IS_ERR(target)) {
719 		ret = PTR_ERR(target);
720 		goto cleanup_watchers;
721 	}
722 
723 	t->u.target = target;
724 	if (t->u.target == &ebt_standard_target) {
725 		if (gap < sizeof(struct ebt_standard_target)) {
726 			BUGPRINT("Standard target size too big\n");
727 			ret = -EFAULT;
728 			goto cleanup_watchers;
729 		}
730 		if (((struct ebt_standard_target *)t)->verdict <
731 		   -NUM_STANDARD_TARGETS) {
732 			BUGPRINT("Invalid standard target\n");
733 			ret = -EFAULT;
734 			goto cleanup_watchers;
735 		}
736 	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
737 		module_put(t->u.target->me);
738 		ret = -EFAULT;
739 		goto cleanup_watchers;
740 	}
741 
742 	tgpar.target   = target;
743 	tgpar.targinfo = t->data;
744 	ret = xt_check_target(&tgpar, t->target_size,
745 	      e->ethproto, e->invflags & EBT_IPROTO);
746 	if (ret < 0) {
747 		module_put(target->me);
748 		goto cleanup_watchers;
749 	}
750 	(*cnt)++;
751 	return 0;
752 cleanup_watchers:
753 	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
754 cleanup_matches:
755 	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
756 	return ret;
757 }
758 
759 /*
760  * checks for loops and sets the hook mask for udc
761  * the hook mask for udc tells us from which base chains the udc can be
762  * accessed. This mask is a parameter to the check() functions of the extensions
763  */
check_chainloops(const struct ebt_entries * chain,struct ebt_cl_stack * cl_s,unsigned int udc_cnt,unsigned int hooknr,char * base)764 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
765    unsigned int udc_cnt, unsigned int hooknr, char *base)
766 {
767 	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
768 	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
769 	const struct ebt_entry_target *t;
770 
771 	while (pos < nentries || chain_nr != -1) {
772 		/* end of udc, go back one 'recursion' step */
773 		if (pos == nentries) {
774 			/* put back values of the time when this chain was called */
775 			e = cl_s[chain_nr].cs.e;
776 			if (cl_s[chain_nr].from != -1)
777 				nentries =
778 				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
779 			else
780 				nentries = chain->nentries;
781 			pos = cl_s[chain_nr].cs.n;
782 			/* make sure we won't see a loop that isn't one */
783 			cl_s[chain_nr].cs.n = 0;
784 			chain_nr = cl_s[chain_nr].from;
785 			if (pos == nentries)
786 				continue;
787 		}
788 		t = (struct ebt_entry_target *)
789 		   (((char *)e) + e->target_offset);
790 		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
791 			goto letscontinue;
792 		if (e->target_offset + sizeof(struct ebt_standard_target) >
793 		   e->next_offset) {
794 			BUGPRINT("Standard target size too big\n");
795 			return -1;
796 		}
797 		verdict = ((struct ebt_standard_target *)t)->verdict;
798 		if (verdict >= 0) { /* jump to another chain */
799 			struct ebt_entries *hlp2 =
800 			   (struct ebt_entries *)(base + verdict);
801 			for (i = 0; i < udc_cnt; i++)
802 				if (hlp2 == cl_s[i].cs.chaininfo)
803 					break;
804 			/* bad destination or loop */
805 			if (i == udc_cnt) {
806 				BUGPRINT("bad destination\n");
807 				return -1;
808 			}
809 			if (cl_s[i].cs.n) {
810 				BUGPRINT("loop\n");
811 				return -1;
812 			}
813 			if (cl_s[i].hookmask & (1 << hooknr))
814 				goto letscontinue;
815 			/* this can't be 0, so the loop test is correct */
816 			cl_s[i].cs.n = pos + 1;
817 			pos = 0;
818 			cl_s[i].cs.e = ebt_next_entry(e);
819 			e = (struct ebt_entry *)(hlp2->data);
820 			nentries = hlp2->nentries;
821 			cl_s[i].from = chain_nr;
822 			chain_nr = i;
823 			/* this udc is accessible from the base chain for hooknr */
824 			cl_s[i].hookmask |= (1 << hooknr);
825 			continue;
826 		}
827 letscontinue:
828 		e = ebt_next_entry(e);
829 		pos++;
830 	}
831 	return 0;
832 }
833 
834 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
translate_table(struct net * net,const char * name,struct ebt_table_info * newinfo)835 static int translate_table(struct net *net, const char *name,
836 			   struct ebt_table_info *newinfo)
837 {
838 	unsigned int i, j, k, udc_cnt;
839 	int ret;
840 	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
841 
842 	i = 0;
843 	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
844 		i++;
845 	if (i == NF_BR_NUMHOOKS) {
846 		BUGPRINT("No valid hooks specified\n");
847 		return -EINVAL;
848 	}
849 	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
850 		BUGPRINT("Chains don't start at beginning\n");
851 		return -EINVAL;
852 	}
853 	/* make sure chains are ordered after each other in same order
854 	   as their corresponding hooks */
855 	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
856 		if (!newinfo->hook_entry[j])
857 			continue;
858 		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
859 			BUGPRINT("Hook order must be followed\n");
860 			return -EINVAL;
861 		}
862 		i = j;
863 	}
864 
865 	/* do some early checkings and initialize some things */
866 	i = 0; /* holds the expected nr. of entries for the chain */
867 	j = 0; /* holds the up to now counted entries for the chain */
868 	k = 0; /* holds the total nr. of entries, should equal
869 		  newinfo->nentries afterwards */
870 	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
871 	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
872 	   ebt_check_entry_size_and_hooks, newinfo,
873 	   &i, &j, &k, &udc_cnt);
874 
875 	if (ret != 0)
876 		return ret;
877 
878 	if (i != j) {
879 		BUGPRINT("nentries does not equal the nr of entries in the "
880 			 "(last) chain\n");
881 		return -EINVAL;
882 	}
883 	if (k != newinfo->nentries) {
884 		BUGPRINT("Total nentries is wrong\n");
885 		return -EINVAL;
886 	}
887 
888 	/* get the location of the udc, put them in an array
889 	   while we're at it, allocate the chainstack */
890 	if (udc_cnt) {
891 		/* this will get free'd in do_replace()/ebt_register_table()
892 		   if an error occurs */
893 		newinfo->chainstack =
894 			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
895 		if (!newinfo->chainstack)
896 			return -ENOMEM;
897 		for_each_possible_cpu(i) {
898 			newinfo->chainstack[i] =
899 			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
900 			if (!newinfo->chainstack[i]) {
901 				while (i)
902 					vfree(newinfo->chainstack[--i]);
903 				vfree(newinfo->chainstack);
904 				newinfo->chainstack = NULL;
905 				return -ENOMEM;
906 			}
907 		}
908 
909 		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
910 		if (!cl_s)
911 			return -ENOMEM;
912 		i = 0; /* the i'th udc */
913 		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
914 		   ebt_get_udc_positions, newinfo, &i, cl_s);
915 		/* sanity check */
916 		if (i != udc_cnt) {
917 			BUGPRINT("i != udc_cnt\n");
918 			vfree(cl_s);
919 			return -EFAULT;
920 		}
921 	}
922 
923 	/* Check for loops */
924 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
925 		if (newinfo->hook_entry[i])
926 			if (check_chainloops(newinfo->hook_entry[i],
927 			   cl_s, udc_cnt, i, newinfo->entries)) {
928 				vfree(cl_s);
929 				return -EINVAL;
930 			}
931 
932 	/* we now know the following (along with E=mc²):
933 	   - the nr of entries in each chain is right
934 	   - the size of the allocated space is right
935 	   - all valid hooks have a corresponding chain
936 	   - there are no loops
937 	   - wrong data can still be on the level of a single entry
938 	   - could be there are jumps to places that are not the
939 	     beginning of a chain. This can only occur in chains that
940 	     are not accessible from any base chains, so we don't care. */
941 
942 	/* used to know what we need to clean up if something goes wrong */
943 	i = 0;
944 	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
945 	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
946 	if (ret != 0) {
947 		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
948 				  ebt_cleanup_entry, net, &i);
949 	}
950 	vfree(cl_s);
951 	return ret;
952 }
953 
954 /* called under write_lock */
get_counters(const struct ebt_counter * oldcounters,struct ebt_counter * counters,unsigned int nentries)955 static void get_counters(const struct ebt_counter *oldcounters,
956    struct ebt_counter *counters, unsigned int nentries)
957 {
958 	int i, cpu;
959 	struct ebt_counter *counter_base;
960 
961 	/* counters of cpu 0 */
962 	memcpy(counters, oldcounters,
963 	       sizeof(struct ebt_counter) * nentries);
964 
965 	/* add other counters to those of cpu 0 */
966 	for_each_possible_cpu(cpu) {
967 		if (cpu == 0)
968 			continue;
969 		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
970 		for (i = 0; i < nentries; i++) {
971 			counters[i].pcnt += counter_base[i].pcnt;
972 			counters[i].bcnt += counter_base[i].bcnt;
973 		}
974 	}
975 }
976 
do_replace_finish(struct net * net,struct ebt_replace * repl,struct ebt_table_info * newinfo)977 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
978 			      struct ebt_table_info *newinfo)
979 {
980 	int ret, i;
981 	struct ebt_counter *counterstmp = NULL;
982 	/* used to be able to unlock earlier */
983 	struct ebt_table_info *table;
984 	struct ebt_table *t;
985 
986 	/* the user wants counters back
987 	   the check on the size is done later, when we have the lock */
988 	if (repl->num_counters) {
989 		unsigned long size = repl->num_counters * sizeof(*counterstmp);
990 		counterstmp = vmalloc(size);
991 		if (!counterstmp)
992 			return -ENOMEM;
993 	}
994 
995 	newinfo->chainstack = NULL;
996 	ret = ebt_verify_pointers(repl, newinfo);
997 	if (ret != 0)
998 		goto free_counterstmp;
999 
1000 	ret = translate_table(net, repl->name, newinfo);
1001 
1002 	if (ret != 0)
1003 		goto free_counterstmp;
1004 
1005 	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1006 	if (!t) {
1007 		ret = -ENOENT;
1008 		goto free_iterate;
1009 	}
1010 
1011 	/* the table doesn't like it */
1012 	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1013 		goto free_unlock;
1014 
1015 	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1016 		BUGPRINT("Wrong nr. of counters requested\n");
1017 		ret = -EINVAL;
1018 		goto free_unlock;
1019 	}
1020 
1021 	/* we have the mutex lock, so no danger in reading this pointer */
1022 	table = t->private;
1023 	/* make sure the table can only be rmmod'ed if it contains no rules */
1024 	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1025 		ret = -ENOENT;
1026 		goto free_unlock;
1027 	} else if (table->nentries && !newinfo->nentries)
1028 		module_put(t->me);
1029 	/* we need an atomic snapshot of the counters */
1030 	write_lock_bh(&t->lock);
1031 	if (repl->num_counters)
1032 		get_counters(t->private->counters, counterstmp,
1033 		   t->private->nentries);
1034 
1035 	t->private = newinfo;
1036 	write_unlock_bh(&t->lock);
1037 	mutex_unlock(&ebt_mutex);
1038 	/* so, a user can change the chains while having messed up her counter
1039 	   allocation. Only reason why this is done is because this way the lock
1040 	   is held only once, while this doesn't bring the kernel into a
1041 	   dangerous state. */
1042 	if (repl->num_counters &&
1043 	   copy_to_user(repl->counters, counterstmp,
1044 	   repl->num_counters * sizeof(struct ebt_counter))) {
1045 		/* Silent error, can't fail, new table is already in place */
1046 		net_warn_ratelimited("ebtables: counters copy to user failed while replacing table\n");
1047 	}
1048 
1049 	/* decrease module count and free resources */
1050 	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1051 			  ebt_cleanup_entry, net, NULL);
1052 
1053 	vfree(table->entries);
1054 	if (table->chainstack) {
1055 		for_each_possible_cpu(i)
1056 			vfree(table->chainstack[i]);
1057 		vfree(table->chainstack);
1058 	}
1059 	vfree(table);
1060 
1061 	vfree(counterstmp);
1062 
1063 #ifdef CONFIG_AUDIT
1064 	if (audit_enabled) {
1065 		struct audit_buffer *ab;
1066 
1067 		ab = audit_log_start(current->audit_context, GFP_KERNEL,
1068 				     AUDIT_NETFILTER_CFG);
1069 		if (ab) {
1070 			audit_log_format(ab, "table=%s family=%u entries=%u",
1071 					 repl->name, AF_BRIDGE, repl->nentries);
1072 			audit_log_end(ab);
1073 		}
1074 	}
1075 #endif
1076 	return ret;
1077 
1078 free_unlock:
1079 	mutex_unlock(&ebt_mutex);
1080 free_iterate:
1081 	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1082 			  ebt_cleanup_entry, net, NULL);
1083 free_counterstmp:
1084 	vfree(counterstmp);
1085 	/* can be initialized in translate_table() */
1086 	if (newinfo->chainstack) {
1087 		for_each_possible_cpu(i)
1088 			vfree(newinfo->chainstack[i]);
1089 		vfree(newinfo->chainstack);
1090 	}
1091 	return ret;
1092 }
1093 
1094 /* replace the table */
do_replace(struct net * net,const void __user * user,unsigned int len)1095 static int do_replace(struct net *net, const void __user *user,
1096 		      unsigned int len)
1097 {
1098 	int ret, countersize;
1099 	struct ebt_table_info *newinfo;
1100 	struct ebt_replace tmp;
1101 
1102 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1103 		return -EFAULT;
1104 
1105 	if (len != sizeof(tmp) + tmp.entries_size) {
1106 		BUGPRINT("Wrong len argument\n");
1107 		return -EINVAL;
1108 	}
1109 
1110 	if (tmp.entries_size == 0) {
1111 		BUGPRINT("Entries_size never zero\n");
1112 		return -EINVAL;
1113 	}
1114 	/* overflow check */
1115 	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1116 			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1117 		return -ENOMEM;
1118 	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1119 		return -ENOMEM;
1120 
1121 	tmp.name[sizeof(tmp.name) - 1] = 0;
1122 
1123 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1124 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1125 	if (!newinfo)
1126 		return -ENOMEM;
1127 
1128 	if (countersize)
1129 		memset(newinfo->counters, 0, countersize);
1130 
1131 	newinfo->entries = vmalloc(tmp.entries_size);
1132 	if (!newinfo->entries) {
1133 		ret = -ENOMEM;
1134 		goto free_newinfo;
1135 	}
1136 	if (copy_from_user(
1137 	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1138 		BUGPRINT("Couldn't copy entries from userspace\n");
1139 		ret = -EFAULT;
1140 		goto free_entries;
1141 	}
1142 
1143 	ret = do_replace_finish(net, &tmp, newinfo);
1144 	if (ret == 0)
1145 		return ret;
1146 free_entries:
1147 	vfree(newinfo->entries);
1148 free_newinfo:
1149 	vfree(newinfo);
1150 	return ret;
1151 }
1152 
1153 struct ebt_table *
ebt_register_table(struct net * net,const struct ebt_table * input_table)1154 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1155 {
1156 	struct ebt_table_info *newinfo;
1157 	struct ebt_table *t, *table;
1158 	struct ebt_replace_kernel *repl;
1159 	int ret, i, countersize;
1160 	void *p;
1161 
1162 	if (input_table == NULL || (repl = input_table->table) == NULL ||
1163 	    repl->entries == NULL || repl->entries_size == 0 ||
1164 	    repl->counters != NULL || input_table->private != NULL) {
1165 		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1166 		return ERR_PTR(-EINVAL);
1167 	}
1168 
1169 	/* Don't add one table to multiple lists. */
1170 	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1171 	if (!table) {
1172 		ret = -ENOMEM;
1173 		goto out;
1174 	}
1175 
1176 	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1177 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1178 	ret = -ENOMEM;
1179 	if (!newinfo)
1180 		goto free_table;
1181 
1182 	p = vmalloc(repl->entries_size);
1183 	if (!p)
1184 		goto free_newinfo;
1185 
1186 	memcpy(p, repl->entries, repl->entries_size);
1187 	newinfo->entries = p;
1188 
1189 	newinfo->entries_size = repl->entries_size;
1190 	newinfo->nentries = repl->nentries;
1191 
1192 	if (countersize)
1193 		memset(newinfo->counters, 0, countersize);
1194 
1195 	/* fill in newinfo and parse the entries */
1196 	newinfo->chainstack = NULL;
1197 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1198 		if ((repl->valid_hooks & (1 << i)) == 0)
1199 			newinfo->hook_entry[i] = NULL;
1200 		else
1201 			newinfo->hook_entry[i] = p +
1202 				((char *)repl->hook_entry[i] - repl->entries);
1203 	}
1204 	ret = translate_table(net, repl->name, newinfo);
1205 	if (ret != 0) {
1206 		BUGPRINT("Translate_table failed\n");
1207 		goto free_chainstack;
1208 	}
1209 
1210 	if (table->check && table->check(newinfo, table->valid_hooks)) {
1211 		BUGPRINT("The table doesn't like its own initial data, lol\n");
1212 		ret = -EINVAL;
1213 		goto free_chainstack;
1214 	}
1215 
1216 	table->private = newinfo;
1217 	rwlock_init(&table->lock);
1218 	mutex_lock(&ebt_mutex);
1219 	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1220 		if (strcmp(t->name, table->name) == 0) {
1221 			ret = -EEXIST;
1222 			BUGPRINT("Table name already exists\n");
1223 			goto free_unlock;
1224 		}
1225 	}
1226 
1227 	/* Hold a reference count if the chains aren't empty */
1228 	if (newinfo->nentries && !try_module_get(table->me)) {
1229 		ret = -ENOENT;
1230 		goto free_unlock;
1231 	}
1232 	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1233 	mutex_unlock(&ebt_mutex);
1234 	return table;
1235 free_unlock:
1236 	mutex_unlock(&ebt_mutex);
1237 free_chainstack:
1238 	if (newinfo->chainstack) {
1239 		for_each_possible_cpu(i)
1240 			vfree(newinfo->chainstack[i]);
1241 		vfree(newinfo->chainstack);
1242 	}
1243 	vfree(newinfo->entries);
1244 free_newinfo:
1245 	vfree(newinfo);
1246 free_table:
1247 	kfree(table);
1248 out:
1249 	return ERR_PTR(ret);
1250 }
1251 
ebt_unregister_table(struct net * net,struct ebt_table * table)1252 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1253 {
1254 	int i;
1255 
1256 	if (!table) {
1257 		BUGPRINT("Request to unregister NULL table!!!\n");
1258 		return;
1259 	}
1260 	mutex_lock(&ebt_mutex);
1261 	list_del(&table->list);
1262 	mutex_unlock(&ebt_mutex);
1263 	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1264 			  ebt_cleanup_entry, net, NULL);
1265 	if (table->private->nentries)
1266 		module_put(table->me);
1267 	vfree(table->private->entries);
1268 	if (table->private->chainstack) {
1269 		for_each_possible_cpu(i)
1270 			vfree(table->private->chainstack[i]);
1271 		vfree(table->private->chainstack);
1272 	}
1273 	vfree(table->private);
1274 	kfree(table);
1275 }
1276 
1277 /* userspace just supplied us with counters */
do_update_counters(struct net * net,const char * name,struct ebt_counter __user * counters,unsigned int num_counters,const void __user * user,unsigned int len)1278 static int do_update_counters(struct net *net, const char *name,
1279 				struct ebt_counter __user *counters,
1280 				unsigned int num_counters,
1281 				const void __user *user, unsigned int len)
1282 {
1283 	int i, ret;
1284 	struct ebt_counter *tmp;
1285 	struct ebt_table *t;
1286 
1287 	if (num_counters == 0)
1288 		return -EINVAL;
1289 
1290 	tmp = vmalloc(num_counters * sizeof(*tmp));
1291 	if (!tmp)
1292 		return -ENOMEM;
1293 
1294 	t = find_table_lock(net, name, &ret, &ebt_mutex);
1295 	if (!t)
1296 		goto free_tmp;
1297 
1298 	if (num_counters != t->private->nentries) {
1299 		BUGPRINT("Wrong nr of counters\n");
1300 		ret = -EINVAL;
1301 		goto unlock_mutex;
1302 	}
1303 
1304 	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1305 		ret = -EFAULT;
1306 		goto unlock_mutex;
1307 	}
1308 
1309 	/* we want an atomic add of the counters */
1310 	write_lock_bh(&t->lock);
1311 
1312 	/* we add to the counters of the first cpu */
1313 	for (i = 0; i < num_counters; i++) {
1314 		t->private->counters[i].pcnt += tmp[i].pcnt;
1315 		t->private->counters[i].bcnt += tmp[i].bcnt;
1316 	}
1317 
1318 	write_unlock_bh(&t->lock);
1319 	ret = 0;
1320 unlock_mutex:
1321 	mutex_unlock(&ebt_mutex);
1322 free_tmp:
1323 	vfree(tmp);
1324 	return ret;
1325 }
1326 
update_counters(struct net * net,const void __user * user,unsigned int len)1327 static int update_counters(struct net *net, const void __user *user,
1328 			    unsigned int len)
1329 {
1330 	struct ebt_replace hlp;
1331 
1332 	if (copy_from_user(&hlp, user, sizeof(hlp)))
1333 		return -EFAULT;
1334 
1335 	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1336 		return -EINVAL;
1337 
1338 	return do_update_counters(net, hlp.name, hlp.counters,
1339 				hlp.num_counters, user, len);
1340 }
1341 
ebt_make_matchname(const struct ebt_entry_match * m,const char * base,char __user * ubase)1342 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1343     const char *base, char __user *ubase)
1344 {
1345 	char __user *hlp = ubase + ((char *)m - base);
1346 	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1347 
1348 	/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1349 	   long. Copy 29 bytes and fill remaining bytes with zeroes. */
1350 	strlcpy(name, m->u.match->name, sizeof(name));
1351 	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1352 		return -EFAULT;
1353 	return 0;
1354 }
1355 
ebt_make_watchername(const struct ebt_entry_watcher * w,const char * base,char __user * ubase)1356 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1357     const char *base, char __user *ubase)
1358 {
1359 	char __user *hlp = ubase + ((char *)w - base);
1360 	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1361 
1362 	strlcpy(name, w->u.watcher->name, sizeof(name));
1363 	if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1364 		return -EFAULT;
1365 	return 0;
1366 }
1367 
1368 static inline int
ebt_make_names(struct ebt_entry * e,const char * base,char __user * ubase)1369 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1370 {
1371 	int ret;
1372 	char __user *hlp;
1373 	const struct ebt_entry_target *t;
1374 	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1375 
1376 	if (e->bitmask == 0)
1377 		return 0;
1378 
1379 	hlp = ubase + (((char *)e + e->target_offset) - base);
1380 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1381 
1382 	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1383 	if (ret != 0)
1384 		return ret;
1385 	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1386 	if (ret != 0)
1387 		return ret;
1388 	strlcpy(name, t->u.target->name, sizeof(name));
1389 	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1390 		return -EFAULT;
1391 	return 0;
1392 }
1393 
copy_counters_to_user(struct ebt_table * t,const struct ebt_counter * oldcounters,void __user * user,unsigned int num_counters,unsigned int nentries)1394 static int copy_counters_to_user(struct ebt_table *t,
1395 				  const struct ebt_counter *oldcounters,
1396 				  void __user *user, unsigned int num_counters,
1397 				  unsigned int nentries)
1398 {
1399 	struct ebt_counter *counterstmp;
1400 	int ret = 0;
1401 
1402 	/* userspace might not need the counters */
1403 	if (num_counters == 0)
1404 		return 0;
1405 
1406 	if (num_counters != nentries) {
1407 		BUGPRINT("Num_counters wrong\n");
1408 		return -EINVAL;
1409 	}
1410 
1411 	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1412 	if (!counterstmp)
1413 		return -ENOMEM;
1414 
1415 	write_lock_bh(&t->lock);
1416 	get_counters(oldcounters, counterstmp, nentries);
1417 	write_unlock_bh(&t->lock);
1418 
1419 	if (copy_to_user(user, counterstmp,
1420 	   nentries * sizeof(struct ebt_counter)))
1421 		ret = -EFAULT;
1422 	vfree(counterstmp);
1423 	return ret;
1424 }
1425 
1426 /* called with ebt_mutex locked */
copy_everything_to_user(struct ebt_table * t,void __user * user,const int * len,int cmd)1427 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1428     const int *len, int cmd)
1429 {
1430 	struct ebt_replace tmp;
1431 	const struct ebt_counter *oldcounters;
1432 	unsigned int entries_size, nentries;
1433 	int ret;
1434 	char *entries;
1435 
1436 	if (cmd == EBT_SO_GET_ENTRIES) {
1437 		entries_size = t->private->entries_size;
1438 		nentries = t->private->nentries;
1439 		entries = t->private->entries;
1440 		oldcounters = t->private->counters;
1441 	} else {
1442 		entries_size = t->table->entries_size;
1443 		nentries = t->table->nentries;
1444 		entries = t->table->entries;
1445 		oldcounters = t->table->counters;
1446 	}
1447 
1448 	if (copy_from_user(&tmp, user, sizeof(tmp)))
1449 		return -EFAULT;
1450 
1451 	if (*len != sizeof(struct ebt_replace) + entries_size +
1452 	   (tmp.num_counters ? nentries * sizeof(struct ebt_counter) : 0))
1453 		return -EINVAL;
1454 
1455 	if (tmp.nentries != nentries) {
1456 		BUGPRINT("Nentries wrong\n");
1457 		return -EINVAL;
1458 	}
1459 
1460 	if (tmp.entries_size != entries_size) {
1461 		BUGPRINT("Wrong size\n");
1462 		return -EINVAL;
1463 	}
1464 
1465 	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1466 					tmp.num_counters, nentries);
1467 	if (ret)
1468 		return ret;
1469 
1470 	if (copy_to_user(tmp.entries, entries, entries_size)) {
1471 		BUGPRINT("Couldn't copy entries to userspace\n");
1472 		return -EFAULT;
1473 	}
1474 	/* set the match/watcher/target names right */
1475 	return EBT_ENTRY_ITERATE(entries, entries_size,
1476 	   ebt_make_names, entries, tmp.entries);
1477 }
1478 
do_ebt_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1479 static int do_ebt_set_ctl(struct sock *sk,
1480 	int cmd, void __user *user, unsigned int len)
1481 {
1482 	int ret;
1483 	struct net *net = sock_net(sk);
1484 
1485 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1486 		return -EPERM;
1487 
1488 	switch (cmd) {
1489 	case EBT_SO_SET_ENTRIES:
1490 		ret = do_replace(net, user, len);
1491 		break;
1492 	case EBT_SO_SET_COUNTERS:
1493 		ret = update_counters(net, user, len);
1494 		break;
1495 	default:
1496 		ret = -EINVAL;
1497 	}
1498 	return ret;
1499 }
1500 
do_ebt_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1501 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1502 {
1503 	int ret;
1504 	struct ebt_replace tmp;
1505 	struct ebt_table *t;
1506 	struct net *net = sock_net(sk);
1507 
1508 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1509 		return -EPERM;
1510 
1511 	if (copy_from_user(&tmp, user, sizeof(tmp)))
1512 		return -EFAULT;
1513 
1514 	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
1515 	if (!t)
1516 		return ret;
1517 
1518 	switch (cmd) {
1519 	case EBT_SO_GET_INFO:
1520 	case EBT_SO_GET_INIT_INFO:
1521 		if (*len != sizeof(struct ebt_replace)) {
1522 			ret = -EINVAL;
1523 			mutex_unlock(&ebt_mutex);
1524 			break;
1525 		}
1526 		if (cmd == EBT_SO_GET_INFO) {
1527 			tmp.nentries = t->private->nentries;
1528 			tmp.entries_size = t->private->entries_size;
1529 			tmp.valid_hooks = t->valid_hooks;
1530 		} else {
1531 			tmp.nentries = t->table->nentries;
1532 			tmp.entries_size = t->table->entries_size;
1533 			tmp.valid_hooks = t->table->valid_hooks;
1534 		}
1535 		mutex_unlock(&ebt_mutex);
1536 		if (copy_to_user(user, &tmp, *len) != 0) {
1537 			BUGPRINT("c2u Didn't work\n");
1538 			ret = -EFAULT;
1539 			break;
1540 		}
1541 		ret = 0;
1542 		break;
1543 
1544 	case EBT_SO_GET_ENTRIES:
1545 	case EBT_SO_GET_INIT_ENTRIES:
1546 		ret = copy_everything_to_user(t, user, len, cmd);
1547 		mutex_unlock(&ebt_mutex);
1548 		break;
1549 
1550 	default:
1551 		mutex_unlock(&ebt_mutex);
1552 		ret = -EINVAL;
1553 	}
1554 
1555 	return ret;
1556 }
1557 
1558 #ifdef CONFIG_COMPAT
1559 /* 32 bit-userspace compatibility definitions. */
1560 struct compat_ebt_replace {
1561 	char name[EBT_TABLE_MAXNAMELEN];
1562 	compat_uint_t valid_hooks;
1563 	compat_uint_t nentries;
1564 	compat_uint_t entries_size;
1565 	/* start of the chains */
1566 	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1567 	/* nr of counters userspace expects back */
1568 	compat_uint_t num_counters;
1569 	/* where the kernel will put the old counters. */
1570 	compat_uptr_t counters;
1571 	compat_uptr_t entries;
1572 };
1573 
1574 /* struct ebt_entry_match, _target and _watcher have same layout */
1575 struct compat_ebt_entry_mwt {
1576 	union {
1577 		char name[EBT_FUNCTION_MAXNAMELEN];
1578 		compat_uptr_t ptr;
1579 	} u;
1580 	compat_uint_t match_size;
1581 	compat_uint_t data[0];
1582 };
1583 
1584 /* account for possible padding between match_size and ->data */
ebt_compat_entry_padsize(void)1585 static int ebt_compat_entry_padsize(void)
1586 {
1587 	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1588 			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1589 	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1590 			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1591 }
1592 
ebt_compat_match_offset(const struct xt_match * match,unsigned int userlen)1593 static int ebt_compat_match_offset(const struct xt_match *match,
1594 				   unsigned int userlen)
1595 {
1596 	/*
1597 	 * ebt_among needs special handling. The kernel .matchsize is
1598 	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1599 	 * value is expected.
1600 	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1601 	 */
1602 	if (unlikely(match->matchsize == -1))
1603 		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1604 	return xt_compat_match_offset(match);
1605 }
1606 
compat_match_to_user(struct ebt_entry_match * m,void __user ** dstptr,unsigned int * size)1607 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1608 				unsigned int *size)
1609 {
1610 	const struct xt_match *match = m->u.match;
1611 	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1612 	int off = ebt_compat_match_offset(match, m->match_size);
1613 	compat_uint_t msize = m->match_size - off;
1614 
1615 	BUG_ON(off >= m->match_size);
1616 
1617 	if (copy_to_user(cm->u.name, match->name,
1618 	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1619 		return -EFAULT;
1620 
1621 	if (match->compat_to_user) {
1622 		if (match->compat_to_user(cm->data, m->data))
1623 			return -EFAULT;
1624 	} else if (copy_to_user(cm->data, m->data, msize))
1625 			return -EFAULT;
1626 
1627 	*size -= ebt_compat_entry_padsize() + off;
1628 	*dstptr = cm->data;
1629 	*dstptr += msize;
1630 	return 0;
1631 }
1632 
compat_target_to_user(struct ebt_entry_target * t,void __user ** dstptr,unsigned int * size)1633 static int compat_target_to_user(struct ebt_entry_target *t,
1634 				 void __user **dstptr,
1635 				 unsigned int *size)
1636 {
1637 	const struct xt_target *target = t->u.target;
1638 	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1639 	int off = xt_compat_target_offset(target);
1640 	compat_uint_t tsize = t->target_size - off;
1641 
1642 	BUG_ON(off >= t->target_size);
1643 
1644 	if (copy_to_user(cm->u.name, target->name,
1645 	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1646 		return -EFAULT;
1647 
1648 	if (target->compat_to_user) {
1649 		if (target->compat_to_user(cm->data, t->data))
1650 			return -EFAULT;
1651 	} else if (copy_to_user(cm->data, t->data, tsize))
1652 		return -EFAULT;
1653 
1654 	*size -= ebt_compat_entry_padsize() + off;
1655 	*dstptr = cm->data;
1656 	*dstptr += tsize;
1657 	return 0;
1658 }
1659 
compat_watcher_to_user(struct ebt_entry_watcher * w,void __user ** dstptr,unsigned int * size)1660 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1661 				  void __user **dstptr,
1662 				  unsigned int *size)
1663 {
1664 	return compat_target_to_user((struct ebt_entry_target *)w,
1665 							dstptr, size);
1666 }
1667 
compat_copy_entry_to_user(struct ebt_entry * e,void __user ** dstptr,unsigned int * size)1668 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1669 				unsigned int *size)
1670 {
1671 	struct ebt_entry_target *t;
1672 	struct ebt_entry __user *ce;
1673 	u32 watchers_offset, target_offset, next_offset;
1674 	compat_uint_t origsize;
1675 	int ret;
1676 
1677 	if (e->bitmask == 0) {
1678 		if (*size < sizeof(struct ebt_entries))
1679 			return -EINVAL;
1680 		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1681 			return -EFAULT;
1682 
1683 		*dstptr += sizeof(struct ebt_entries);
1684 		*size -= sizeof(struct ebt_entries);
1685 		return 0;
1686 	}
1687 
1688 	if (*size < sizeof(*ce))
1689 		return -EINVAL;
1690 
1691 	ce = (struct ebt_entry __user *)*dstptr;
1692 	if (copy_to_user(ce, e, sizeof(*ce)))
1693 		return -EFAULT;
1694 
1695 	origsize = *size;
1696 	*dstptr += sizeof(*ce);
1697 
1698 	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1699 	if (ret)
1700 		return ret;
1701 	watchers_offset = e->watchers_offset - (origsize - *size);
1702 
1703 	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1704 	if (ret)
1705 		return ret;
1706 	target_offset = e->target_offset - (origsize - *size);
1707 
1708 	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1709 
1710 	ret = compat_target_to_user(t, dstptr, size);
1711 	if (ret)
1712 		return ret;
1713 	next_offset = e->next_offset - (origsize - *size);
1714 
1715 	if (put_user(watchers_offset, &ce->watchers_offset) ||
1716 	    put_user(target_offset, &ce->target_offset) ||
1717 	    put_user(next_offset, &ce->next_offset))
1718 		return -EFAULT;
1719 
1720 	*size -= sizeof(*ce);
1721 	return 0;
1722 }
1723 
compat_calc_match(struct ebt_entry_match * m,int * off)1724 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1725 {
1726 	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1727 	*off += ebt_compat_entry_padsize();
1728 	return 0;
1729 }
1730 
compat_calc_watcher(struct ebt_entry_watcher * w,int * off)1731 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1732 {
1733 	*off += xt_compat_target_offset(w->u.watcher);
1734 	*off += ebt_compat_entry_padsize();
1735 	return 0;
1736 }
1737 
compat_calc_entry(const struct ebt_entry * e,const struct ebt_table_info * info,const void * base,struct compat_ebt_replace * newinfo)1738 static int compat_calc_entry(const struct ebt_entry *e,
1739 			     const struct ebt_table_info *info,
1740 			     const void *base,
1741 			     struct compat_ebt_replace *newinfo)
1742 {
1743 	const struct ebt_entry_target *t;
1744 	unsigned int entry_offset;
1745 	int off, ret, i;
1746 
1747 	if (e->bitmask == 0)
1748 		return 0;
1749 
1750 	off = 0;
1751 	entry_offset = (void *)e - base;
1752 
1753 	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1754 	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1755 
1756 	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1757 
1758 	off += xt_compat_target_offset(t->u.target);
1759 	off += ebt_compat_entry_padsize();
1760 
1761 	newinfo->entries_size -= off;
1762 
1763 	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1764 	if (ret)
1765 		return ret;
1766 
1767 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1768 		const void *hookptr = info->hook_entry[i];
1769 		if (info->hook_entry[i] &&
1770 		    (e < (struct ebt_entry *)(base - hookptr))) {
1771 			newinfo->hook_entry[i] -= off;
1772 			pr_debug("0x%08X -> 0x%08X\n",
1773 					newinfo->hook_entry[i] + off,
1774 					newinfo->hook_entry[i]);
1775 		}
1776 	}
1777 
1778 	return 0;
1779 }
1780 
1781 
compat_table_info(const struct ebt_table_info * info,struct compat_ebt_replace * newinfo)1782 static int compat_table_info(const struct ebt_table_info *info,
1783 			     struct compat_ebt_replace *newinfo)
1784 {
1785 	unsigned int size = info->entries_size;
1786 	const void *entries = info->entries;
1787 
1788 	newinfo->entries_size = size;
1789 
1790 	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1791 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1792 							entries, newinfo);
1793 }
1794 
compat_copy_everything_to_user(struct ebt_table * t,void __user * user,int * len,int cmd)1795 static int compat_copy_everything_to_user(struct ebt_table *t,
1796 					  void __user *user, int *len, int cmd)
1797 {
1798 	struct compat_ebt_replace repl, tmp;
1799 	struct ebt_counter *oldcounters;
1800 	struct ebt_table_info tinfo;
1801 	int ret;
1802 	void __user *pos;
1803 
1804 	memset(&tinfo, 0, sizeof(tinfo));
1805 
1806 	if (cmd == EBT_SO_GET_ENTRIES) {
1807 		tinfo.entries_size = t->private->entries_size;
1808 		tinfo.nentries = t->private->nentries;
1809 		tinfo.entries = t->private->entries;
1810 		oldcounters = t->private->counters;
1811 	} else {
1812 		tinfo.entries_size = t->table->entries_size;
1813 		tinfo.nentries = t->table->nentries;
1814 		tinfo.entries = t->table->entries;
1815 		oldcounters = t->table->counters;
1816 	}
1817 
1818 	if (copy_from_user(&tmp, user, sizeof(tmp)))
1819 		return -EFAULT;
1820 
1821 	if (tmp.nentries != tinfo.nentries ||
1822 	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1823 		return -EINVAL;
1824 
1825 	memcpy(&repl, &tmp, sizeof(repl));
1826 	if (cmd == EBT_SO_GET_ENTRIES)
1827 		ret = compat_table_info(t->private, &repl);
1828 	else
1829 		ret = compat_table_info(&tinfo, &repl);
1830 	if (ret)
1831 		return ret;
1832 
1833 	if (*len != sizeof(tmp) + repl.entries_size +
1834 	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1835 		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1836 				*len, tinfo.entries_size, repl.entries_size);
1837 		return -EINVAL;
1838 	}
1839 
1840 	/* userspace might not need the counters */
1841 	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1842 					tmp.num_counters, tinfo.nentries);
1843 	if (ret)
1844 		return ret;
1845 
1846 	pos = compat_ptr(tmp.entries);
1847 	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1848 			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1849 }
1850 
1851 struct ebt_entries_buf_state {
1852 	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1853 	u32 buf_kern_len;	/* total size of kernel buffer */
1854 	u32 buf_kern_offset;	/* amount of data copied so far */
1855 	u32 buf_user_offset;	/* read position in userspace buffer */
1856 };
1857 
ebt_buf_count(struct ebt_entries_buf_state * state,unsigned int sz)1858 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1859 {
1860 	state->buf_kern_offset += sz;
1861 	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1862 }
1863 
ebt_buf_add(struct ebt_entries_buf_state * state,void * data,unsigned int sz)1864 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1865 		       void *data, unsigned int sz)
1866 {
1867 	if (state->buf_kern_start == NULL)
1868 		goto count_only;
1869 
1870 	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1871 
1872 	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1873 
1874  count_only:
1875 	state->buf_user_offset += sz;
1876 	return ebt_buf_count(state, sz);
1877 }
1878 
ebt_buf_add_pad(struct ebt_entries_buf_state * state,unsigned int sz)1879 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1880 {
1881 	char *b = state->buf_kern_start;
1882 
1883 	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1884 
1885 	if (b != NULL && sz > 0)
1886 		memset(b + state->buf_kern_offset, 0, sz);
1887 	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1888 	return ebt_buf_count(state, sz);
1889 }
1890 
1891 enum compat_mwt {
1892 	EBT_COMPAT_MATCH,
1893 	EBT_COMPAT_WATCHER,
1894 	EBT_COMPAT_TARGET,
1895 };
1896 
compat_mtw_from_user(struct compat_ebt_entry_mwt * mwt,enum compat_mwt compat_mwt,struct ebt_entries_buf_state * state,const unsigned char * base)1897 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1898 				enum compat_mwt compat_mwt,
1899 				struct ebt_entries_buf_state *state,
1900 				const unsigned char *base)
1901 {
1902 	char name[EBT_FUNCTION_MAXNAMELEN];
1903 	struct xt_match *match;
1904 	struct xt_target *wt;
1905 	void *dst = NULL;
1906 	int off, pad = 0;
1907 	unsigned int size_kern, match_size = mwt->match_size;
1908 
1909 	strlcpy(name, mwt->u.name, sizeof(name));
1910 
1911 	if (state->buf_kern_start)
1912 		dst = state->buf_kern_start + state->buf_kern_offset;
1913 
1914 	switch (compat_mwt) {
1915 	case EBT_COMPAT_MATCH:
1916 		match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1917 		if (IS_ERR(match))
1918 			return PTR_ERR(match);
1919 
1920 		off = ebt_compat_match_offset(match, match_size);
1921 		if (dst) {
1922 			if (match->compat_from_user)
1923 				match->compat_from_user(dst, mwt->data);
1924 			else
1925 				memcpy(dst, mwt->data, match_size);
1926 		}
1927 
1928 		size_kern = match->matchsize;
1929 		if (unlikely(size_kern == -1))
1930 			size_kern = match_size;
1931 		module_put(match->me);
1932 		break;
1933 	case EBT_COMPAT_WATCHER: /* fallthrough */
1934 	case EBT_COMPAT_TARGET:
1935 		wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1936 		if (IS_ERR(wt))
1937 			return PTR_ERR(wt);
1938 		off = xt_compat_target_offset(wt);
1939 
1940 		if (dst) {
1941 			if (wt->compat_from_user)
1942 				wt->compat_from_user(dst, mwt->data);
1943 			else
1944 				memcpy(dst, mwt->data, match_size);
1945 		}
1946 
1947 		size_kern = wt->targetsize;
1948 		module_put(wt->me);
1949 		break;
1950 
1951 	default:
1952 		return -EINVAL;
1953 	}
1954 
1955 	state->buf_kern_offset += match_size + off;
1956 	state->buf_user_offset += match_size;
1957 	pad = XT_ALIGN(size_kern) - size_kern;
1958 
1959 	if (pad > 0 && dst) {
1960 		BUG_ON(state->buf_kern_len <= pad);
1961 		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1962 		memset(dst + size_kern, 0, pad);
1963 	}
1964 	return off + match_size;
1965 }
1966 
1967 /*
1968  * return size of all matches, watchers or target, including necessary
1969  * alignment and padding.
1970  */
ebt_size_mwt(struct compat_ebt_entry_mwt * match32,unsigned int size_left,enum compat_mwt type,struct ebt_entries_buf_state * state,const void * base)1971 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1972 			unsigned int size_left, enum compat_mwt type,
1973 			struct ebt_entries_buf_state *state, const void *base)
1974 {
1975 	int growth = 0;
1976 	char *buf;
1977 
1978 	if (size_left == 0)
1979 		return 0;
1980 
1981 	buf = (char *) match32;
1982 
1983 	while (size_left >= sizeof(*match32)) {
1984 		struct ebt_entry_match *match_kern;
1985 		int ret;
1986 
1987 		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1988 		if (match_kern) {
1989 			char *tmp;
1990 			tmp = state->buf_kern_start + state->buf_kern_offset;
1991 			match_kern = (struct ebt_entry_match *) tmp;
1992 		}
1993 		ret = ebt_buf_add(state, buf, sizeof(*match32));
1994 		if (ret < 0)
1995 			return ret;
1996 		size_left -= sizeof(*match32);
1997 
1998 		/* add padding before match->data (if any) */
1999 		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
2000 		if (ret < 0)
2001 			return ret;
2002 
2003 		if (match32->match_size > size_left)
2004 			return -EINVAL;
2005 
2006 		size_left -= match32->match_size;
2007 
2008 		ret = compat_mtw_from_user(match32, type, state, base);
2009 		if (ret < 0)
2010 			return ret;
2011 
2012 		BUG_ON(ret < match32->match_size);
2013 		growth += ret - match32->match_size;
2014 		growth += ebt_compat_entry_padsize();
2015 
2016 		buf += sizeof(*match32);
2017 		buf += match32->match_size;
2018 
2019 		if (match_kern)
2020 			match_kern->match_size = ret;
2021 
2022 		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2023 		match32 = (struct compat_ebt_entry_mwt *) buf;
2024 	}
2025 
2026 	return growth;
2027 }
2028 
2029 /* called for all ebt_entry structures. */
size_entry_mwt(struct ebt_entry * entry,const unsigned char * base,unsigned int * total,struct ebt_entries_buf_state * state)2030 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2031 			  unsigned int *total,
2032 			  struct ebt_entries_buf_state *state)
2033 {
2034 	unsigned int i, j, startoff, new_offset = 0;
2035 	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2036 	unsigned int offsets[4];
2037 	unsigned int *offsets_update = NULL;
2038 	int ret;
2039 	char *buf_start;
2040 
2041 	if (*total < sizeof(struct ebt_entries))
2042 		return -EINVAL;
2043 
2044 	if (!entry->bitmask) {
2045 		*total -= sizeof(struct ebt_entries);
2046 		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2047 	}
2048 	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2049 		return -EINVAL;
2050 
2051 	startoff = state->buf_user_offset;
2052 	/* pull in most part of ebt_entry, it does not need to be changed. */
2053 	ret = ebt_buf_add(state, entry,
2054 			offsetof(struct ebt_entry, watchers_offset));
2055 	if (ret < 0)
2056 		return ret;
2057 
2058 	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2059 	memcpy(&offsets[1], &entry->watchers_offset,
2060 			sizeof(offsets) - sizeof(offsets[0]));
2061 
2062 	if (state->buf_kern_start) {
2063 		buf_start = state->buf_kern_start + state->buf_kern_offset;
2064 		offsets_update = (unsigned int *) buf_start;
2065 	}
2066 	ret = ebt_buf_add(state, &offsets[1],
2067 			sizeof(offsets) - sizeof(offsets[0]));
2068 	if (ret < 0)
2069 		return ret;
2070 	buf_start = (char *) entry;
2071 	/*
2072 	 * 0: matches offset, always follows ebt_entry.
2073 	 * 1: watchers offset, from ebt_entry structure
2074 	 * 2: target offset, from ebt_entry structure
2075 	 * 3: next ebt_entry offset, from ebt_entry structure
2076 	 *
2077 	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2078 	 */
2079 	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2080 		struct compat_ebt_entry_mwt *match32;
2081 		unsigned int size;
2082 		char *buf = buf_start;
2083 
2084 		buf = buf_start + offsets[i];
2085 		if (offsets[i] > offsets[j])
2086 			return -EINVAL;
2087 
2088 		match32 = (struct compat_ebt_entry_mwt *) buf;
2089 		size = offsets[j] - offsets[i];
2090 		ret = ebt_size_mwt(match32, size, i, state, base);
2091 		if (ret < 0)
2092 			return ret;
2093 		new_offset += ret;
2094 		if (offsets_update && new_offset) {
2095 			pr_debug("change offset %d to %d\n",
2096 				offsets_update[i], offsets[j] + new_offset);
2097 			offsets_update[i] = offsets[j] + new_offset;
2098 		}
2099 	}
2100 
2101 	if (state->buf_kern_start == NULL) {
2102 		unsigned int offset = buf_start - (char *) base;
2103 
2104 		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2105 		if (ret < 0)
2106 			return ret;
2107 	}
2108 
2109 	startoff = state->buf_user_offset - startoff;
2110 
2111 	BUG_ON(*total < startoff);
2112 	*total -= startoff;
2113 	return 0;
2114 }
2115 
2116 /*
2117  * repl->entries_size is the size of the ebt_entry blob in userspace.
2118  * It might need more memory when copied to a 64 bit kernel in case
2119  * userspace is 32-bit. So, first task: find out how much memory is needed.
2120  *
2121  * Called before validation is performed.
2122  */
compat_copy_entries(unsigned char * data,unsigned int size_user,struct ebt_entries_buf_state * state)2123 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2124 				struct ebt_entries_buf_state *state)
2125 {
2126 	unsigned int size_remaining = size_user;
2127 	int ret;
2128 
2129 	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2130 					&size_remaining, state);
2131 	if (ret < 0)
2132 		return ret;
2133 
2134 	WARN_ON(size_remaining);
2135 	return state->buf_kern_offset;
2136 }
2137 
2138 
compat_copy_ebt_replace_from_user(struct ebt_replace * repl,void __user * user,unsigned int len)2139 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2140 					    void __user *user, unsigned int len)
2141 {
2142 	struct compat_ebt_replace tmp;
2143 	int i;
2144 
2145 	if (len < sizeof(tmp))
2146 		return -EINVAL;
2147 
2148 	if (copy_from_user(&tmp, user, sizeof(tmp)))
2149 		return -EFAULT;
2150 
2151 	if (len != sizeof(tmp) + tmp.entries_size)
2152 		return -EINVAL;
2153 
2154 	if (tmp.entries_size == 0)
2155 		return -EINVAL;
2156 
2157 	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2158 			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2159 		return -ENOMEM;
2160 	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2161 		return -ENOMEM;
2162 
2163 	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2164 
2165 	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2166 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2167 		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2168 
2169 	repl->num_counters = tmp.num_counters;
2170 	repl->counters = compat_ptr(tmp.counters);
2171 	repl->entries = compat_ptr(tmp.entries);
2172 	return 0;
2173 }
2174 
compat_do_replace(struct net * net,void __user * user,unsigned int len)2175 static int compat_do_replace(struct net *net, void __user *user,
2176 			     unsigned int len)
2177 {
2178 	int ret, i, countersize, size64;
2179 	struct ebt_table_info *newinfo;
2180 	struct ebt_replace tmp;
2181 	struct ebt_entries_buf_state state;
2182 	void *entries_tmp;
2183 
2184 	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2185 	if (ret) {
2186 		/* try real handler in case userland supplied needed padding */
2187 		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2188 			ret = 0;
2189 		return ret;
2190 	}
2191 
2192 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2193 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2194 	if (!newinfo)
2195 		return -ENOMEM;
2196 
2197 	if (countersize)
2198 		memset(newinfo->counters, 0, countersize);
2199 
2200 	memset(&state, 0, sizeof(state));
2201 
2202 	newinfo->entries = vmalloc(tmp.entries_size);
2203 	if (!newinfo->entries) {
2204 		ret = -ENOMEM;
2205 		goto free_newinfo;
2206 	}
2207 	if (copy_from_user(
2208 	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2209 		ret = -EFAULT;
2210 		goto free_entries;
2211 	}
2212 
2213 	entries_tmp = newinfo->entries;
2214 
2215 	xt_compat_lock(NFPROTO_BRIDGE);
2216 
2217 	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2218 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2219 	if (ret < 0)
2220 		goto out_unlock;
2221 
2222 	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2223 		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2224 		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2225 
2226 	size64 = ret;
2227 	newinfo->entries = vmalloc(size64);
2228 	if (!newinfo->entries) {
2229 		vfree(entries_tmp);
2230 		ret = -ENOMEM;
2231 		goto out_unlock;
2232 	}
2233 
2234 	memset(&state, 0, sizeof(state));
2235 	state.buf_kern_start = newinfo->entries;
2236 	state.buf_kern_len = size64;
2237 
2238 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2239 	BUG_ON(ret < 0);	/* parses same data again */
2240 
2241 	vfree(entries_tmp);
2242 	tmp.entries_size = size64;
2243 
2244 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2245 		char __user *usrptr;
2246 		if (tmp.hook_entry[i]) {
2247 			unsigned int delta;
2248 			usrptr = (char __user *) tmp.hook_entry[i];
2249 			delta = usrptr - tmp.entries;
2250 			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2251 			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2252 		}
2253 	}
2254 
2255 	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2256 	xt_compat_unlock(NFPROTO_BRIDGE);
2257 
2258 	ret = do_replace_finish(net, &tmp, newinfo);
2259 	if (ret == 0)
2260 		return ret;
2261 free_entries:
2262 	vfree(newinfo->entries);
2263 free_newinfo:
2264 	vfree(newinfo);
2265 	return ret;
2266 out_unlock:
2267 	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2268 	xt_compat_unlock(NFPROTO_BRIDGE);
2269 	goto free_entries;
2270 }
2271 
compat_update_counters(struct net * net,void __user * user,unsigned int len)2272 static int compat_update_counters(struct net *net, void __user *user,
2273 				  unsigned int len)
2274 {
2275 	struct compat_ebt_replace hlp;
2276 
2277 	if (copy_from_user(&hlp, user, sizeof(hlp)))
2278 		return -EFAULT;
2279 
2280 	/* try real handler in case userland supplied needed padding */
2281 	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2282 		return update_counters(net, user, len);
2283 
2284 	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2285 					hlp.num_counters, user, len);
2286 }
2287 
compat_do_ebt_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)2288 static int compat_do_ebt_set_ctl(struct sock *sk,
2289 		int cmd, void __user *user, unsigned int len)
2290 {
2291 	int ret;
2292 	struct net *net = sock_net(sk);
2293 
2294 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2295 		return -EPERM;
2296 
2297 	switch (cmd) {
2298 	case EBT_SO_SET_ENTRIES:
2299 		ret = compat_do_replace(net, user, len);
2300 		break;
2301 	case EBT_SO_SET_COUNTERS:
2302 		ret = compat_update_counters(net, user, len);
2303 		break;
2304 	default:
2305 		ret = -EINVAL;
2306   }
2307 	return ret;
2308 }
2309 
compat_do_ebt_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)2310 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2311 		void __user *user, int *len)
2312 {
2313 	int ret;
2314 	struct compat_ebt_replace tmp;
2315 	struct ebt_table *t;
2316 	struct net *net = sock_net(sk);
2317 
2318 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2319 		return -EPERM;
2320 
2321 	/* try real handler in case userland supplied needed padding */
2322 	if ((cmd == EBT_SO_GET_INFO ||
2323 	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2324 			return do_ebt_get_ctl(sk, cmd, user, len);
2325 
2326 	if (copy_from_user(&tmp, user, sizeof(tmp)))
2327 		return -EFAULT;
2328 
2329 	t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
2330 	if (!t)
2331 		return ret;
2332 
2333 	xt_compat_lock(NFPROTO_BRIDGE);
2334 	switch (cmd) {
2335 	case EBT_SO_GET_INFO:
2336 		tmp.nentries = t->private->nentries;
2337 		ret = compat_table_info(t->private, &tmp);
2338 		if (ret)
2339 			goto out;
2340 		tmp.valid_hooks = t->valid_hooks;
2341 
2342 		if (copy_to_user(user, &tmp, *len) != 0) {
2343 			ret = -EFAULT;
2344 			break;
2345 		}
2346 		ret = 0;
2347 		break;
2348 	case EBT_SO_GET_INIT_INFO:
2349 		tmp.nentries = t->table->nentries;
2350 		tmp.entries_size = t->table->entries_size;
2351 		tmp.valid_hooks = t->table->valid_hooks;
2352 
2353 		if (copy_to_user(user, &tmp, *len) != 0) {
2354 			ret = -EFAULT;
2355 			break;
2356 		}
2357 		ret = 0;
2358 		break;
2359 	case EBT_SO_GET_ENTRIES:
2360 	case EBT_SO_GET_INIT_ENTRIES:
2361 		/*
2362 		 * try real handler first in case of userland-side padding.
2363 		 * in case we are dealing with an 'ordinary' 32 bit binary
2364 		 * without 64bit compatibility padding, this will fail right
2365 		 * after copy_from_user when the *len argument is validated.
2366 		 *
2367 		 * the compat_ variant needs to do one pass over the kernel
2368 		 * data set to adjust for size differences before it the check.
2369 		 */
2370 		if (copy_everything_to_user(t, user, len, cmd) == 0)
2371 			ret = 0;
2372 		else
2373 			ret = compat_copy_everything_to_user(t, user, len, cmd);
2374 		break;
2375 	default:
2376 		ret = -EINVAL;
2377 	}
2378  out:
2379 	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2380 	xt_compat_unlock(NFPROTO_BRIDGE);
2381 	mutex_unlock(&ebt_mutex);
2382 	return ret;
2383 }
2384 #endif
2385 
2386 static struct nf_sockopt_ops ebt_sockopts = {
2387 	.pf		= PF_INET,
2388 	.set_optmin	= EBT_BASE_CTL,
2389 	.set_optmax	= EBT_SO_SET_MAX + 1,
2390 	.set		= do_ebt_set_ctl,
2391 #ifdef CONFIG_COMPAT
2392 	.compat_set	= compat_do_ebt_set_ctl,
2393 #endif
2394 	.get_optmin	= EBT_BASE_CTL,
2395 	.get_optmax	= EBT_SO_GET_MAX + 1,
2396 	.get		= do_ebt_get_ctl,
2397 #ifdef CONFIG_COMPAT
2398 	.compat_get	= compat_do_ebt_get_ctl,
2399 #endif
2400 	.owner		= THIS_MODULE,
2401 };
2402 
ebtables_init(void)2403 static int __init ebtables_init(void)
2404 {
2405 	int ret;
2406 
2407 	ret = xt_register_target(&ebt_standard_target);
2408 	if (ret < 0)
2409 		return ret;
2410 	ret = nf_register_sockopt(&ebt_sockopts);
2411 	if (ret < 0) {
2412 		xt_unregister_target(&ebt_standard_target);
2413 		return ret;
2414 	}
2415 
2416 	printk(KERN_INFO "Ebtables v2.0 registered\n");
2417 	return 0;
2418 }
2419 
ebtables_fini(void)2420 static void __exit ebtables_fini(void)
2421 {
2422 	nf_unregister_sockopt(&ebt_sockopts);
2423 	xt_unregister_target(&ebt_standard_target);
2424 	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2425 }
2426 
2427 EXPORT_SYMBOL(ebt_register_table);
2428 EXPORT_SYMBOL(ebt_unregister_table);
2429 EXPORT_SYMBOL(ebt_do_table);
2430 module_init(ebtables_init);
2431 module_exit(ebtables_fini);
2432 MODULE_LICENSE("GPL");
2433