• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  * Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/cache.h>
14 #include <linux/capability.h>
15 #include <linux/skbuff.h>
16 #include <linux/kmod.h>
17 #include <linux/vmalloc.h>
18 #include <linux/netdevice.h>
19 #include <linux/module.h>
20 #include <linux/icmp.h>
21 #include <net/ip.h>
22 #include <net/compat.h>
23 #include <linux/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28 
29 #include <linux/netfilter/x_tables.h>
30 #include <linux/netfilter_ipv4/ip_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33 
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv4 packet filter");
37 
ipt_alloc_initial_table(const struct xt_table * info)38 void *ipt_alloc_initial_table(const struct xt_table *info)
39 {
40 	return xt_alloc_initial_table(ipt, IPT);
41 }
42 EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
43 
44 /* Returns whether matches rule or not. */
45 /* Performance critical - called for every packet */
46 static inline bool
ip_packet_match(const struct iphdr * ip,const char * indev,const char * outdev,const struct ipt_ip * ipinfo,int isfrag)47 ip_packet_match(const struct iphdr *ip,
48 		const char *indev,
49 		const char *outdev,
50 		const struct ipt_ip *ipinfo,
51 		int isfrag)
52 {
53 	unsigned long ret;
54 
55 	if (NF_INVF(ipinfo, IPT_INV_SRCIP,
56 		    (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
57 	    NF_INVF(ipinfo, IPT_INV_DSTIP,
58 		    (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
59 		return false;
60 
61 	ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
62 
63 	if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
64 		return false;
65 
66 	ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
67 
68 	if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
69 		return false;
70 
71 	/* Check specific protocol */
72 	if (ipinfo->proto &&
73 	    NF_INVF(ipinfo, IPT_INV_PROTO, ip->protocol != ipinfo->proto))
74 		return false;
75 
76 	/* If we have a fragment rule but the packet is not a fragment
77 	 * then we return zero */
78 	if (NF_INVF(ipinfo, IPT_INV_FRAG,
79 		    (ipinfo->flags & IPT_F_FRAG) && !isfrag))
80 		return false;
81 
82 	return true;
83 }
84 
85 static bool
ip_checkentry(const struct ipt_ip * ip)86 ip_checkentry(const struct ipt_ip *ip)
87 {
88 	if (ip->flags & ~IPT_F_MASK)
89 		return false;
90 	if (ip->invflags & ~IPT_INV_MASK)
91 		return false;
92 	return true;
93 }
94 
95 static unsigned int
ipt_error(struct sk_buff * skb,const struct xt_action_param * par)96 ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
97 {
98 	net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
99 
100 	return NF_DROP;
101 }
102 
103 /* Performance critical */
104 static inline struct ipt_entry *
get_entry(const void * base,unsigned int offset)105 get_entry(const void *base, unsigned int offset)
106 {
107 	return (struct ipt_entry *)(base + offset);
108 }
109 
110 /* All zeroes == unconditional rule. */
111 /* Mildly perf critical (only if packet tracing is on) */
unconditional(const struct ipt_entry * e)112 static inline bool unconditional(const struct ipt_entry *e)
113 {
114 	static const struct ipt_ip uncond;
115 
116 	return e->target_offset == sizeof(struct ipt_entry) &&
117 	       memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
118 }
119 
120 /* for const-correctness */
121 static inline const struct xt_entry_target *
ipt_get_target_c(const struct ipt_entry * e)122 ipt_get_target_c(const struct ipt_entry *e)
123 {
124 	return ipt_get_target((struct ipt_entry *)e);
125 }
126 
127 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
128 static const char *const hooknames[] = {
129 	[NF_INET_PRE_ROUTING]		= "PREROUTING",
130 	[NF_INET_LOCAL_IN]		= "INPUT",
131 	[NF_INET_FORWARD]		= "FORWARD",
132 	[NF_INET_LOCAL_OUT]		= "OUTPUT",
133 	[NF_INET_POST_ROUTING]		= "POSTROUTING",
134 };
135 
136 enum nf_ip_trace_comments {
137 	NF_IP_TRACE_COMMENT_RULE,
138 	NF_IP_TRACE_COMMENT_RETURN,
139 	NF_IP_TRACE_COMMENT_POLICY,
140 };
141 
142 static const char *const comments[] = {
143 	[NF_IP_TRACE_COMMENT_RULE]	= "rule",
144 	[NF_IP_TRACE_COMMENT_RETURN]	= "return",
145 	[NF_IP_TRACE_COMMENT_POLICY]	= "policy",
146 };
147 
148 static const struct nf_loginfo trace_loginfo = {
149 	.type = NF_LOG_TYPE_LOG,
150 	.u = {
151 		.log = {
152 			.level = 4,
153 			.logflags = NF_LOG_DEFAULT_MASK,
154 		},
155 	},
156 };
157 
158 /* Mildly perf critical (only if packet tracing is on) */
159 static inline int
get_chainname_rulenum(const struct ipt_entry * s,const struct ipt_entry * e,const char * hookname,const char ** chainname,const char ** comment,unsigned int * rulenum)160 get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
161 		      const char *hookname, const char **chainname,
162 		      const char **comment, unsigned int *rulenum)
163 {
164 	const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
165 
166 	if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
167 		/* Head of user chain: ERROR target with chainname */
168 		*chainname = t->target.data;
169 		(*rulenum) = 0;
170 	} else if (s == e) {
171 		(*rulenum)++;
172 
173 		if (unconditional(s) &&
174 		    strcmp(t->target.u.kernel.target->name,
175 			   XT_STANDARD_TARGET) == 0 &&
176 		   t->verdict < 0) {
177 			/* Tail of chains: STANDARD target (return/policy) */
178 			*comment = *chainname == hookname
179 				? comments[NF_IP_TRACE_COMMENT_POLICY]
180 				: comments[NF_IP_TRACE_COMMENT_RETURN];
181 		}
182 		return 1;
183 	} else
184 		(*rulenum)++;
185 
186 	return 0;
187 }
188 
trace_packet(struct net * net,const struct sk_buff * skb,unsigned int hook,const struct net_device * in,const struct net_device * out,const char * tablename,const struct xt_table_info * private,const struct ipt_entry * e)189 static void trace_packet(struct net *net,
190 			 const struct sk_buff *skb,
191 			 unsigned int hook,
192 			 const struct net_device *in,
193 			 const struct net_device *out,
194 			 const char *tablename,
195 			 const struct xt_table_info *private,
196 			 const struct ipt_entry *e)
197 {
198 	const struct ipt_entry *root;
199 	const char *hookname, *chainname, *comment;
200 	const struct ipt_entry *iter;
201 	unsigned int rulenum = 0;
202 
203 	root = get_entry(private->entries, private->hook_entry[hook]);
204 
205 	hookname = chainname = hooknames[hook];
206 	comment = comments[NF_IP_TRACE_COMMENT_RULE];
207 
208 	xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
209 		if (get_chainname_rulenum(iter, e, hookname,
210 		    &chainname, &comment, &rulenum) != 0)
211 			break;
212 
213 	nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
214 		     "TRACE: %s:%s:%s:%u ",
215 		     tablename, chainname, comment, rulenum);
216 }
217 #endif
218 
219 static inline
ipt_next_entry(const struct ipt_entry * entry)220 struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
221 {
222 	return (void *)entry + entry->next_offset;
223 }
224 
225 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
226 unsigned int
ipt_do_table(struct sk_buff * skb,const struct nf_hook_state * state,struct xt_table * table)227 ipt_do_table(struct sk_buff *skb,
228 	     const struct nf_hook_state *state,
229 	     struct xt_table *table)
230 {
231 	unsigned int hook = state->hook;
232 	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
233 	const struct iphdr *ip;
234 	/* Initializing verdict to NF_DROP keeps gcc happy. */
235 	unsigned int verdict = NF_DROP;
236 	const char *indev, *outdev;
237 	const void *table_base;
238 	struct ipt_entry *e, **jumpstack;
239 	unsigned int stackidx, cpu;
240 	const struct xt_table_info *private;
241 	struct xt_action_param acpar;
242 	unsigned int addend;
243 
244 	/* Initialization */
245 	stackidx = 0;
246 	ip = ip_hdr(skb);
247 	indev = state->in ? state->in->name : nulldevname;
248 	outdev = state->out ? state->out->name : nulldevname;
249 	/* We handle fragments by dealing with the first fragment as
250 	 * if it was a normal packet.  All other fragments are treated
251 	 * normally, except that they will NEVER match rules that ask
252 	 * things we don't know, ie. tcp syn flag or ports).  If the
253 	 * rule is also a fragment-specific rule, non-fragments won't
254 	 * match it. */
255 	acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
256 	acpar.thoff   = ip_hdrlen(skb);
257 	acpar.hotdrop = false;
258 	acpar.state   = state;
259 
260 	WARN_ON(!(table->valid_hooks & (1 << hook)));
261 	local_bh_disable();
262 	addend = xt_write_recseq_begin();
263 	private = table->private;
264 	cpu        = smp_processor_id();
265 	/*
266 	 * Ensure we load private-> members after we've fetched the base
267 	 * pointer.
268 	 */
269 	smp_read_barrier_depends();
270 	table_base = private->entries;
271 	jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
272 
273 	/* Switch to alternate jumpstack if we're being invoked via TEE.
274 	 * TEE issues XT_CONTINUE verdict on original skb so we must not
275 	 * clobber the jumpstack.
276 	 *
277 	 * For recursion via REJECT or SYNPROXY the stack will be clobbered
278 	 * but it is no problem since absolute verdict is issued by these.
279 	 */
280 	if (static_key_false(&xt_tee_enabled))
281 		jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
282 
283 	e = get_entry(table_base, private->hook_entry[hook]);
284 
285 	do {
286 		const struct xt_entry_target *t;
287 		const struct xt_entry_match *ematch;
288 		struct xt_counters *counter;
289 
290 		WARN_ON(!e);
291 		if (!ip_packet_match(ip, indev, outdev,
292 		    &e->ip, acpar.fragoff)) {
293  no_match:
294 			e = ipt_next_entry(e);
295 			continue;
296 		}
297 
298 		xt_ematch_foreach(ematch, e) {
299 			acpar.match     = ematch->u.kernel.match;
300 			acpar.matchinfo = ematch->data;
301 			if (!acpar.match->match(skb, &acpar))
302 				goto no_match;
303 		}
304 
305 		counter = xt_get_this_cpu_counter(&e->counters);
306 		ADD_COUNTER(*counter, skb->len, 1);
307 
308 		t = ipt_get_target(e);
309 		WARN_ON(!t->u.kernel.target);
310 
311 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
312 		/* The packet is traced: log it */
313 		if (unlikely(skb->nf_trace))
314 			trace_packet(state->net, skb, hook, state->in,
315 				     state->out, table->name, private, e);
316 #endif
317 		/* Standard target? */
318 		if (!t->u.kernel.target->target) {
319 			int v;
320 
321 			v = ((struct xt_standard_target *)t)->verdict;
322 			if (v < 0) {
323 				/* Pop from stack? */
324 				if (v != XT_RETURN) {
325 					verdict = (unsigned int)(-v) - 1;
326 					break;
327 				}
328 				if (stackidx == 0) {
329 					e = get_entry(table_base,
330 					    private->underflow[hook]);
331 				} else {
332 					e = jumpstack[--stackidx];
333 					e = ipt_next_entry(e);
334 				}
335 				continue;
336 			}
337 			if (table_base + v != ipt_next_entry(e) &&
338 			    !(e->ip.flags & IPT_F_GOTO)) {
339 				if (unlikely(stackidx >= private->stacksize)) {
340 					verdict = NF_DROP;
341 					break;
342 				}
343 				jumpstack[stackidx++] = e;
344 			}
345 
346 			e = get_entry(table_base, v);
347 			continue;
348 		}
349 
350 		acpar.target   = t->u.kernel.target;
351 		acpar.targinfo = t->data;
352 
353 		verdict = t->u.kernel.target->target(skb, &acpar);
354 		if (verdict == XT_CONTINUE) {
355 			/* Target might have changed stuff. */
356 			ip = ip_hdr(skb);
357 			e = ipt_next_entry(e);
358 		} else {
359 			/* Verdict */
360 			break;
361 		}
362 	} while (!acpar.hotdrop);
363 
364 	xt_write_recseq_end(addend);
365 	local_bh_enable();
366 
367 	if (acpar.hotdrop)
368 		return NF_DROP;
369 	else return verdict;
370 }
371 
372 /* Figures out from what hook each rule can be called: returns 0 if
373    there are loops.  Puts hook bitmask in comefrom. */
374 static int
mark_source_chains(const struct xt_table_info * newinfo,unsigned int valid_hooks,void * entry0,unsigned int * offsets)375 mark_source_chains(const struct xt_table_info *newinfo,
376 		   unsigned int valid_hooks, void *entry0,
377 		   unsigned int *offsets)
378 {
379 	unsigned int hook;
380 
381 	/* No recursion; use packet counter to save back ptrs (reset
382 	   to 0 as we leave), and comefrom to save source hook bitmask */
383 	for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
384 		unsigned int pos = newinfo->hook_entry[hook];
385 		struct ipt_entry *e = entry0 + pos;
386 
387 		if (!(valid_hooks & (1 << hook)))
388 			continue;
389 
390 		/* Set initial back pointer. */
391 		e->counters.pcnt = pos;
392 
393 		for (;;) {
394 			const struct xt_standard_target *t
395 				= (void *)ipt_get_target_c(e);
396 			int visited = e->comefrom & (1 << hook);
397 
398 			if (e->comefrom & (1 << NF_INET_NUMHOOKS))
399 				return 0;
400 
401 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
402 
403 			/* Unconditional return/END. */
404 			if ((unconditional(e) &&
405 			     (strcmp(t->target.u.user.name,
406 				     XT_STANDARD_TARGET) == 0) &&
407 			     t->verdict < 0) || visited) {
408 				unsigned int oldpos, size;
409 
410 				if ((strcmp(t->target.u.user.name,
411 					    XT_STANDARD_TARGET) == 0) &&
412 				    t->verdict < -NF_MAX_VERDICT - 1)
413 					return 0;
414 
415 				/* Return: backtrack through the last
416 				   big jump. */
417 				do {
418 					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
419 					oldpos = pos;
420 					pos = e->counters.pcnt;
421 					e->counters.pcnt = 0;
422 
423 					/* We're at the start. */
424 					if (pos == oldpos)
425 						goto next;
426 
427 					e = entry0 + pos;
428 				} while (oldpos == pos + e->next_offset);
429 
430 				/* Move along one */
431 				size = e->next_offset;
432 				e = entry0 + pos + size;
433 				if (pos + size >= newinfo->size)
434 					return 0;
435 				e->counters.pcnt = pos;
436 				pos += size;
437 			} else {
438 				int newpos = t->verdict;
439 
440 				if (strcmp(t->target.u.user.name,
441 					   XT_STANDARD_TARGET) == 0 &&
442 				    newpos >= 0) {
443 					/* This a jump; chase it. */
444 					if (!xt_find_jump_offset(offsets, newpos,
445 								 newinfo->number))
446 						return 0;
447 					e = entry0 + newpos;
448 				} else {
449 					/* ... this is a fallthru */
450 					newpos = pos + e->next_offset;
451 					if (newpos >= newinfo->size)
452 						return 0;
453 				}
454 				e = entry0 + newpos;
455 				e->counters.pcnt = pos;
456 				pos = newpos;
457 			}
458 		}
459 next:		;
460 	}
461 	return 1;
462 }
463 
cleanup_match(struct xt_entry_match * m,struct net * net)464 static void cleanup_match(struct xt_entry_match *m, struct net *net)
465 {
466 	struct xt_mtdtor_param par;
467 
468 	par.net       = net;
469 	par.match     = m->u.kernel.match;
470 	par.matchinfo = m->data;
471 	par.family    = NFPROTO_IPV4;
472 	if (par.match->destroy != NULL)
473 		par.match->destroy(&par);
474 	module_put(par.match->me);
475 }
476 
477 static int
check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)478 check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
479 {
480 	const struct ipt_ip *ip = par->entryinfo;
481 
482 	par->match     = m->u.kernel.match;
483 	par->matchinfo = m->data;
484 
485 	return xt_check_match(par, m->u.match_size - sizeof(*m),
486 			      ip->proto, ip->invflags & IPT_INV_PROTO);
487 }
488 
489 static int
find_check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)490 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
491 {
492 	struct xt_match *match;
493 	int ret;
494 
495 	match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
496 				      m->u.user.revision);
497 	if (IS_ERR(match))
498 		return PTR_ERR(match);
499 	m->u.kernel.match = match;
500 
501 	ret = check_match(m, par);
502 	if (ret)
503 		goto err;
504 
505 	return 0;
506 err:
507 	module_put(m->u.kernel.match->me);
508 	return ret;
509 }
510 
check_target(struct ipt_entry * e,struct net * net,const char * name)511 static int check_target(struct ipt_entry *e, struct net *net, const char *name)
512 {
513 	struct xt_entry_target *t = ipt_get_target(e);
514 	struct xt_tgchk_param par = {
515 		.net       = net,
516 		.table     = name,
517 		.entryinfo = e,
518 		.target    = t->u.kernel.target,
519 		.targinfo  = t->data,
520 		.hook_mask = e->comefrom,
521 		.family    = NFPROTO_IPV4,
522 	};
523 
524 	return xt_check_target(&par, t->u.target_size - sizeof(*t),
525 			       e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
526 }
527 
528 static int
find_check_entry(struct ipt_entry * e,struct net * net,const char * name,unsigned int size,struct xt_percpu_counter_alloc_state * alloc_state)529 find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
530 		 unsigned int size,
531 		 struct xt_percpu_counter_alloc_state *alloc_state)
532 {
533 	struct xt_entry_target *t;
534 	struct xt_target *target;
535 	int ret;
536 	unsigned int j;
537 	struct xt_mtchk_param mtpar;
538 	struct xt_entry_match *ematch;
539 
540 	if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
541 		return -ENOMEM;
542 
543 	j = 0;
544 	memset(&mtpar, 0, sizeof(mtpar));
545 	mtpar.net	= net;
546 	mtpar.table     = name;
547 	mtpar.entryinfo = &e->ip;
548 	mtpar.hook_mask = e->comefrom;
549 	mtpar.family    = NFPROTO_IPV4;
550 	xt_ematch_foreach(ematch, e) {
551 		ret = find_check_match(ematch, &mtpar);
552 		if (ret != 0)
553 			goto cleanup_matches;
554 		++j;
555 	}
556 
557 	t = ipt_get_target(e);
558 	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
559 					t->u.user.revision);
560 	if (IS_ERR(target)) {
561 		ret = PTR_ERR(target);
562 		goto cleanup_matches;
563 	}
564 	t->u.kernel.target = target;
565 
566 	ret = check_target(e, net, name);
567 	if (ret)
568 		goto err;
569 
570 	return 0;
571  err:
572 	module_put(t->u.kernel.target->me);
573  cleanup_matches:
574 	xt_ematch_foreach(ematch, e) {
575 		if (j-- == 0)
576 			break;
577 		cleanup_match(ematch, net);
578 	}
579 
580 	xt_percpu_counter_free(&e->counters);
581 
582 	return ret;
583 }
584 
check_underflow(const struct ipt_entry * e)585 static bool check_underflow(const struct ipt_entry *e)
586 {
587 	const struct xt_entry_target *t;
588 	unsigned int verdict;
589 
590 	if (!unconditional(e))
591 		return false;
592 	t = ipt_get_target_c(e);
593 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
594 		return false;
595 	verdict = ((struct xt_standard_target *)t)->verdict;
596 	verdict = -verdict - 1;
597 	return verdict == NF_DROP || verdict == NF_ACCEPT;
598 }
599 
600 static int
check_entry_size_and_hooks(struct ipt_entry * e,struct xt_table_info * newinfo,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,unsigned int valid_hooks)601 check_entry_size_and_hooks(struct ipt_entry *e,
602 			   struct xt_table_info *newinfo,
603 			   const unsigned char *base,
604 			   const unsigned char *limit,
605 			   const unsigned int *hook_entries,
606 			   const unsigned int *underflows,
607 			   unsigned int valid_hooks)
608 {
609 	unsigned int h;
610 	int err;
611 
612 	if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
613 	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
614 	    (unsigned char *)e + e->next_offset > limit)
615 		return -EINVAL;
616 
617 	if (e->next_offset
618 	    < sizeof(struct ipt_entry) + sizeof(struct xt_entry_target))
619 		return -EINVAL;
620 
621 	if (!ip_checkentry(&e->ip))
622 		return -EINVAL;
623 
624 	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
625 				     e->next_offset);
626 	if (err)
627 		return err;
628 
629 	/* Check hooks & underflows */
630 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
631 		if (!(valid_hooks & (1 << h)))
632 			continue;
633 		if ((unsigned char *)e - base == hook_entries[h])
634 			newinfo->hook_entry[h] = hook_entries[h];
635 		if ((unsigned char *)e - base == underflows[h]) {
636 			if (!check_underflow(e))
637 				return -EINVAL;
638 
639 			newinfo->underflow[h] = underflows[h];
640 		}
641 	}
642 
643 	/* Clear counters and comefrom */
644 	e->counters = ((struct xt_counters) { 0, 0 });
645 	e->comefrom = 0;
646 	return 0;
647 }
648 
649 static void
cleanup_entry(struct ipt_entry * e,struct net * net)650 cleanup_entry(struct ipt_entry *e, struct net *net)
651 {
652 	struct xt_tgdtor_param par;
653 	struct xt_entry_target *t;
654 	struct xt_entry_match *ematch;
655 
656 	/* Cleanup all matches */
657 	xt_ematch_foreach(ematch, e)
658 		cleanup_match(ematch, net);
659 	t = ipt_get_target(e);
660 
661 	par.net      = net;
662 	par.target   = t->u.kernel.target;
663 	par.targinfo = t->data;
664 	par.family   = NFPROTO_IPV4;
665 	if (par.target->destroy != NULL)
666 		par.target->destroy(&par);
667 	module_put(par.target->me);
668 	xt_percpu_counter_free(&e->counters);
669 }
670 
671 /* Checks and translates the user-supplied table segment (held in
672    newinfo) */
673 static int
translate_table(struct net * net,struct xt_table_info * newinfo,void * entry0,const struct ipt_replace * repl)674 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
675 		const struct ipt_replace *repl)
676 {
677 	struct xt_percpu_counter_alloc_state alloc_state = { 0 };
678 	struct ipt_entry *iter;
679 	unsigned int *offsets;
680 	unsigned int i;
681 	int ret = 0;
682 
683 	newinfo->size = repl->size;
684 	newinfo->number = repl->num_entries;
685 
686 	/* Init all hooks to impossible value. */
687 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
688 		newinfo->hook_entry[i] = 0xFFFFFFFF;
689 		newinfo->underflow[i] = 0xFFFFFFFF;
690 	}
691 
692 	offsets = xt_alloc_entry_offsets(newinfo->number);
693 	if (!offsets)
694 		return -ENOMEM;
695 	i = 0;
696 	/* Walk through entries, checking offsets. */
697 	xt_entry_foreach(iter, entry0, newinfo->size) {
698 		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
699 						 entry0 + repl->size,
700 						 repl->hook_entry,
701 						 repl->underflow,
702 						 repl->valid_hooks);
703 		if (ret != 0)
704 			goto out_free;
705 		if (i < repl->num_entries)
706 			offsets[i] = (void *)iter - entry0;
707 		++i;
708 		if (strcmp(ipt_get_target(iter)->u.user.name,
709 		    XT_ERROR_TARGET) == 0)
710 			++newinfo->stacksize;
711 	}
712 
713 	ret = -EINVAL;
714 	if (i != repl->num_entries)
715 		goto out_free;
716 
717 	/* Check hooks all assigned */
718 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
719 		/* Only hooks which are valid */
720 		if (!(repl->valid_hooks & (1 << i)))
721 			continue;
722 		if (newinfo->hook_entry[i] == 0xFFFFFFFF)
723 			goto out_free;
724 		if (newinfo->underflow[i] == 0xFFFFFFFF)
725 			goto out_free;
726 	}
727 
728 	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
729 		ret = -ELOOP;
730 		goto out_free;
731 	}
732 	kvfree(offsets);
733 
734 	/* Finally, each sanity check must pass */
735 	i = 0;
736 	xt_entry_foreach(iter, entry0, newinfo->size) {
737 		ret = find_check_entry(iter, net, repl->name, repl->size,
738 				       &alloc_state);
739 		if (ret != 0)
740 			break;
741 		++i;
742 	}
743 
744 	if (ret != 0) {
745 		xt_entry_foreach(iter, entry0, newinfo->size) {
746 			if (i-- == 0)
747 				break;
748 			cleanup_entry(iter, net);
749 		}
750 		return ret;
751 	}
752 
753 	return ret;
754  out_free:
755 	kvfree(offsets);
756 	return ret;
757 }
758 
759 static void
get_counters(const struct xt_table_info * t,struct xt_counters counters[])760 get_counters(const struct xt_table_info *t,
761 	     struct xt_counters counters[])
762 {
763 	struct ipt_entry *iter;
764 	unsigned int cpu;
765 	unsigned int i;
766 
767 	for_each_possible_cpu(cpu) {
768 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
769 
770 		i = 0;
771 		xt_entry_foreach(iter, t->entries, t->size) {
772 			struct xt_counters *tmp;
773 			u64 bcnt, pcnt;
774 			unsigned int start;
775 
776 			tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
777 			do {
778 				start = read_seqcount_begin(s);
779 				bcnt = tmp->bcnt;
780 				pcnt = tmp->pcnt;
781 			} while (read_seqcount_retry(s, start));
782 
783 			ADD_COUNTER(counters[i], bcnt, pcnt);
784 			++i; /* macro does multi eval of i */
785 			cond_resched();
786 		}
787 	}
788 }
789 
alloc_counters(const struct xt_table * table)790 static struct xt_counters *alloc_counters(const struct xt_table *table)
791 {
792 	unsigned int countersize;
793 	struct xt_counters *counters;
794 	const struct xt_table_info *private = table->private;
795 
796 	/* We need atomic snapshot of counters: rest doesn't change
797 	   (other than comefrom, which userspace doesn't care
798 	   about). */
799 	countersize = sizeof(struct xt_counters) * private->number;
800 	counters = vzalloc(countersize);
801 
802 	if (counters == NULL)
803 		return ERR_PTR(-ENOMEM);
804 
805 	get_counters(private, counters);
806 
807 	return counters;
808 }
809 
810 static int
copy_entries_to_user(unsigned int total_size,const struct xt_table * table,void __user * userptr)811 copy_entries_to_user(unsigned int total_size,
812 		     const struct xt_table *table,
813 		     void __user *userptr)
814 {
815 	unsigned int off, num;
816 	const struct ipt_entry *e;
817 	struct xt_counters *counters;
818 	const struct xt_table_info *private = table->private;
819 	int ret = 0;
820 	const void *loc_cpu_entry;
821 
822 	counters = alloc_counters(table);
823 	if (IS_ERR(counters))
824 		return PTR_ERR(counters);
825 
826 	loc_cpu_entry = private->entries;
827 
828 	/* FIXME: use iterator macros --RR */
829 	/* ... then go back and fix counters and names */
830 	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
831 		unsigned int i;
832 		const struct xt_entry_match *m;
833 		const struct xt_entry_target *t;
834 
835 		e = loc_cpu_entry + off;
836 		if (copy_to_user(userptr + off, e, sizeof(*e))) {
837 			ret = -EFAULT;
838 			goto free_counters;
839 		}
840 		if (copy_to_user(userptr + off
841 				 + offsetof(struct ipt_entry, counters),
842 				 &counters[num],
843 				 sizeof(counters[num])) != 0) {
844 			ret = -EFAULT;
845 			goto free_counters;
846 		}
847 
848 		for (i = sizeof(struct ipt_entry);
849 		     i < e->target_offset;
850 		     i += m->u.match_size) {
851 			m = (void *)e + i;
852 
853 			if (xt_match_to_user(m, userptr + off + i)) {
854 				ret = -EFAULT;
855 				goto free_counters;
856 			}
857 		}
858 
859 		t = ipt_get_target_c(e);
860 		if (xt_target_to_user(t, userptr + off + e->target_offset)) {
861 			ret = -EFAULT;
862 			goto free_counters;
863 		}
864 	}
865 
866  free_counters:
867 	vfree(counters);
868 	return ret;
869 }
870 
871 #ifdef CONFIG_COMPAT
compat_standard_from_user(void * dst,const void * src)872 static void compat_standard_from_user(void *dst, const void *src)
873 {
874 	int v = *(compat_int_t *)src;
875 
876 	if (v > 0)
877 		v += xt_compat_calc_jump(AF_INET, v);
878 	memcpy(dst, &v, sizeof(v));
879 }
880 
compat_standard_to_user(void __user * dst,const void * src)881 static int compat_standard_to_user(void __user *dst, const void *src)
882 {
883 	compat_int_t cv = *(int *)src;
884 
885 	if (cv > 0)
886 		cv -= xt_compat_calc_jump(AF_INET, cv);
887 	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
888 }
889 
compat_calc_entry(const struct ipt_entry * e,const struct xt_table_info * info,const void * base,struct xt_table_info * newinfo)890 static int compat_calc_entry(const struct ipt_entry *e,
891 			     const struct xt_table_info *info,
892 			     const void *base, struct xt_table_info *newinfo)
893 {
894 	const struct xt_entry_match *ematch;
895 	const struct xt_entry_target *t;
896 	unsigned int entry_offset;
897 	int off, i, ret;
898 
899 	off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
900 	entry_offset = (void *)e - base;
901 	xt_ematch_foreach(ematch, e)
902 		off += xt_compat_match_offset(ematch->u.kernel.match);
903 	t = ipt_get_target_c(e);
904 	off += xt_compat_target_offset(t->u.kernel.target);
905 	newinfo->size -= off;
906 	ret = xt_compat_add_offset(AF_INET, entry_offset, off);
907 	if (ret)
908 		return ret;
909 
910 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
911 		if (info->hook_entry[i] &&
912 		    (e < (struct ipt_entry *)(base + info->hook_entry[i])))
913 			newinfo->hook_entry[i] -= off;
914 		if (info->underflow[i] &&
915 		    (e < (struct ipt_entry *)(base + info->underflow[i])))
916 			newinfo->underflow[i] -= off;
917 	}
918 	return 0;
919 }
920 
compat_table_info(const struct xt_table_info * info,struct xt_table_info * newinfo)921 static int compat_table_info(const struct xt_table_info *info,
922 			     struct xt_table_info *newinfo)
923 {
924 	struct ipt_entry *iter;
925 	const void *loc_cpu_entry;
926 	int ret;
927 
928 	if (!newinfo || !info)
929 		return -EINVAL;
930 
931 	/* we dont care about newinfo->entries */
932 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
933 	newinfo->initial_entries = 0;
934 	loc_cpu_entry = info->entries;
935 	ret = xt_compat_init_offsets(AF_INET, info->number);
936 	if (ret)
937 		return ret;
938 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
939 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
940 		if (ret != 0)
941 			return ret;
942 	}
943 	return 0;
944 }
945 #endif
946 
get_info(struct net * net,void __user * user,const int * len,int compat)947 static int get_info(struct net *net, void __user *user,
948 		    const int *len, int compat)
949 {
950 	char name[XT_TABLE_MAXNAMELEN];
951 	struct xt_table *t;
952 	int ret;
953 
954 	if (*len != sizeof(struct ipt_getinfo))
955 		return -EINVAL;
956 
957 	if (copy_from_user(name, user, sizeof(name)) != 0)
958 		return -EFAULT;
959 
960 	name[XT_TABLE_MAXNAMELEN-1] = '\0';
961 #ifdef CONFIG_COMPAT
962 	if (compat)
963 		xt_compat_lock(AF_INET);
964 #endif
965 	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
966 				    "iptable_%s", name);
967 	if (t) {
968 		struct ipt_getinfo info;
969 		const struct xt_table_info *private = t->private;
970 #ifdef CONFIG_COMPAT
971 		struct xt_table_info tmp;
972 
973 		if (compat) {
974 			ret = compat_table_info(private, &tmp);
975 			xt_compat_flush_offsets(AF_INET);
976 			private = &tmp;
977 		}
978 #endif
979 		memset(&info, 0, sizeof(info));
980 		info.valid_hooks = t->valid_hooks;
981 		memcpy(info.hook_entry, private->hook_entry,
982 		       sizeof(info.hook_entry));
983 		memcpy(info.underflow, private->underflow,
984 		       sizeof(info.underflow));
985 		info.num_entries = private->number;
986 		info.size = private->size;
987 		strcpy(info.name, name);
988 
989 		if (copy_to_user(user, &info, *len) != 0)
990 			ret = -EFAULT;
991 		else
992 			ret = 0;
993 
994 		xt_table_unlock(t);
995 		module_put(t->me);
996 	} else
997 		ret = -ENOENT;
998 #ifdef CONFIG_COMPAT
999 	if (compat)
1000 		xt_compat_unlock(AF_INET);
1001 #endif
1002 	return ret;
1003 }
1004 
1005 static int
get_entries(struct net * net,struct ipt_get_entries __user * uptr,const int * len)1006 get_entries(struct net *net, struct ipt_get_entries __user *uptr,
1007 	    const int *len)
1008 {
1009 	int ret;
1010 	struct ipt_get_entries get;
1011 	struct xt_table *t;
1012 
1013 	if (*len < sizeof(get))
1014 		return -EINVAL;
1015 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1016 		return -EFAULT;
1017 	if (*len != sizeof(struct ipt_get_entries) + get.size)
1018 		return -EINVAL;
1019 	get.name[sizeof(get.name) - 1] = '\0';
1020 
1021 	t = xt_find_table_lock(net, AF_INET, get.name);
1022 	if (t) {
1023 		const struct xt_table_info *private = t->private;
1024 		if (get.size == private->size)
1025 			ret = copy_entries_to_user(private->size,
1026 						   t, uptr->entrytable);
1027 		else
1028 			ret = -EAGAIN;
1029 
1030 		module_put(t->me);
1031 		xt_table_unlock(t);
1032 	} else
1033 		ret = -ENOENT;
1034 
1035 	return ret;
1036 }
1037 
1038 static int
__do_replace(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info * newinfo,unsigned int num_counters,void __user * counters_ptr)1039 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1040 	     struct xt_table_info *newinfo, unsigned int num_counters,
1041 	     void __user *counters_ptr)
1042 {
1043 	int ret;
1044 	struct xt_table *t;
1045 	struct xt_table_info *oldinfo;
1046 	struct xt_counters *counters;
1047 	struct ipt_entry *iter;
1048 
1049 	ret = 0;
1050 	counters = xt_counters_alloc(num_counters);
1051 	if (!counters) {
1052 		ret = -ENOMEM;
1053 		goto out;
1054 	}
1055 
1056 	t = try_then_request_module(xt_find_table_lock(net, AF_INET, name),
1057 				    "iptable_%s", name);
1058 	if (!t) {
1059 		ret = -ENOENT;
1060 		goto free_newinfo_counters_untrans;
1061 	}
1062 
1063 	/* You lied! */
1064 	if (valid_hooks != t->valid_hooks) {
1065 		ret = -EINVAL;
1066 		goto put_module;
1067 	}
1068 
1069 	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1070 	if (!oldinfo)
1071 		goto put_module;
1072 
1073 	/* Update module usage count based on number of rules */
1074 	if ((oldinfo->number > oldinfo->initial_entries) ||
1075 	    (newinfo->number <= oldinfo->initial_entries))
1076 		module_put(t->me);
1077 	if ((oldinfo->number > oldinfo->initial_entries) &&
1078 	    (newinfo->number <= oldinfo->initial_entries))
1079 		module_put(t->me);
1080 
1081 	/* Get the old counters, and synchronize with replace */
1082 	get_counters(oldinfo, counters);
1083 
1084 	/* Decrease module usage counts and free resource */
1085 	xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1086 		cleanup_entry(iter, net);
1087 
1088 	xt_free_table_info(oldinfo);
1089 	if (copy_to_user(counters_ptr, counters,
1090 			 sizeof(struct xt_counters) * num_counters) != 0) {
1091 		/* Silent error, can't fail, new table is already in place */
1092 		net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
1093 	}
1094 	vfree(counters);
1095 	xt_table_unlock(t);
1096 	return ret;
1097 
1098  put_module:
1099 	module_put(t->me);
1100 	xt_table_unlock(t);
1101  free_newinfo_counters_untrans:
1102 	vfree(counters);
1103  out:
1104 	return ret;
1105 }
1106 
1107 static int
do_replace(struct net * net,const void __user * user,unsigned int len)1108 do_replace(struct net *net, const void __user *user, unsigned int len)
1109 {
1110 	int ret;
1111 	struct ipt_replace tmp;
1112 	struct xt_table_info *newinfo;
1113 	void *loc_cpu_entry;
1114 	struct ipt_entry *iter;
1115 
1116 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1117 		return -EFAULT;
1118 
1119 	/* overflow check */
1120 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1121 		return -ENOMEM;
1122 	if (tmp.num_counters == 0)
1123 		return -EINVAL;
1124 
1125 	tmp.name[sizeof(tmp.name)-1] = 0;
1126 
1127 	newinfo = xt_alloc_table_info(tmp.size);
1128 	if (!newinfo)
1129 		return -ENOMEM;
1130 
1131 	loc_cpu_entry = newinfo->entries;
1132 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1133 			   tmp.size) != 0) {
1134 		ret = -EFAULT;
1135 		goto free_newinfo;
1136 	}
1137 
1138 	ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1139 	if (ret != 0)
1140 		goto free_newinfo;
1141 
1142 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1143 			   tmp.num_counters, tmp.counters);
1144 	if (ret)
1145 		goto free_newinfo_untrans;
1146 	return 0;
1147 
1148  free_newinfo_untrans:
1149 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1150 		cleanup_entry(iter, net);
1151  free_newinfo:
1152 	xt_free_table_info(newinfo);
1153 	return ret;
1154 }
1155 
1156 static int
do_add_counters(struct net * net,const void __user * user,unsigned int len,int compat)1157 do_add_counters(struct net *net, const void __user *user,
1158 		unsigned int len, int compat)
1159 {
1160 	unsigned int i;
1161 	struct xt_counters_info tmp;
1162 	struct xt_counters *paddc;
1163 	struct xt_table *t;
1164 	const struct xt_table_info *private;
1165 	int ret = 0;
1166 	struct ipt_entry *iter;
1167 	unsigned int addend;
1168 
1169 	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1170 	if (IS_ERR(paddc))
1171 		return PTR_ERR(paddc);
1172 
1173 	t = xt_find_table_lock(net, AF_INET, tmp.name);
1174 	if (!t) {
1175 		ret = -ENOENT;
1176 		goto free;
1177 	}
1178 
1179 	local_bh_disable();
1180 	private = t->private;
1181 	if (private->number != tmp.num_counters) {
1182 		ret = -EINVAL;
1183 		goto unlock_up_free;
1184 	}
1185 
1186 	i = 0;
1187 	addend = xt_write_recseq_begin();
1188 	xt_entry_foreach(iter, private->entries, private->size) {
1189 		struct xt_counters *tmp;
1190 
1191 		tmp = xt_get_this_cpu_counter(&iter->counters);
1192 		ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1193 		++i;
1194 	}
1195 	xt_write_recseq_end(addend);
1196  unlock_up_free:
1197 	local_bh_enable();
1198 	xt_table_unlock(t);
1199 	module_put(t->me);
1200  free:
1201 	vfree(paddc);
1202 
1203 	return ret;
1204 }
1205 
1206 #ifdef CONFIG_COMPAT
1207 struct compat_ipt_replace {
1208 	char			name[XT_TABLE_MAXNAMELEN];
1209 	u32			valid_hooks;
1210 	u32			num_entries;
1211 	u32			size;
1212 	u32			hook_entry[NF_INET_NUMHOOKS];
1213 	u32			underflow[NF_INET_NUMHOOKS];
1214 	u32			num_counters;
1215 	compat_uptr_t		counters;	/* struct xt_counters * */
1216 	struct compat_ipt_entry	entries[0];
1217 };
1218 
1219 static int
compat_copy_entry_to_user(struct ipt_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i)1220 compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
1221 			  unsigned int *size, struct xt_counters *counters,
1222 			  unsigned int i)
1223 {
1224 	struct xt_entry_target *t;
1225 	struct compat_ipt_entry __user *ce;
1226 	u_int16_t target_offset, next_offset;
1227 	compat_uint_t origsize;
1228 	const struct xt_entry_match *ematch;
1229 	int ret = 0;
1230 
1231 	origsize = *size;
1232 	ce = *dstptr;
1233 	if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
1234 	    copy_to_user(&ce->counters, &counters[i],
1235 	    sizeof(counters[i])) != 0)
1236 		return -EFAULT;
1237 
1238 	*dstptr += sizeof(struct compat_ipt_entry);
1239 	*size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1240 
1241 	xt_ematch_foreach(ematch, e) {
1242 		ret = xt_compat_match_to_user(ematch, dstptr, size);
1243 		if (ret != 0)
1244 			return ret;
1245 	}
1246 	target_offset = e->target_offset - (origsize - *size);
1247 	t = ipt_get_target(e);
1248 	ret = xt_compat_target_to_user(t, dstptr, size);
1249 	if (ret)
1250 		return ret;
1251 	next_offset = e->next_offset - (origsize - *size);
1252 	if (put_user(target_offset, &ce->target_offset) != 0 ||
1253 	    put_user(next_offset, &ce->next_offset) != 0)
1254 		return -EFAULT;
1255 	return 0;
1256 }
1257 
1258 static int
compat_find_calc_match(struct xt_entry_match * m,const struct ipt_ip * ip,int * size)1259 compat_find_calc_match(struct xt_entry_match *m,
1260 		       const struct ipt_ip *ip,
1261 		       int *size)
1262 {
1263 	struct xt_match *match;
1264 
1265 	match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
1266 				      m->u.user.revision);
1267 	if (IS_ERR(match))
1268 		return PTR_ERR(match);
1269 
1270 	m->u.kernel.match = match;
1271 	*size += xt_compat_match_offset(match);
1272 	return 0;
1273 }
1274 
compat_release_entry(struct compat_ipt_entry * e)1275 static void compat_release_entry(struct compat_ipt_entry *e)
1276 {
1277 	struct xt_entry_target *t;
1278 	struct xt_entry_match *ematch;
1279 
1280 	/* Cleanup all matches */
1281 	xt_ematch_foreach(ematch, e)
1282 		module_put(ematch->u.kernel.match->me);
1283 	t = compat_ipt_get_target(e);
1284 	module_put(t->u.kernel.target->me);
1285 }
1286 
1287 static int
check_compat_entry_size_and_hooks(struct compat_ipt_entry * e,struct xt_table_info * newinfo,unsigned int * size,const unsigned char * base,const unsigned char * limit)1288 check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
1289 				  struct xt_table_info *newinfo,
1290 				  unsigned int *size,
1291 				  const unsigned char *base,
1292 				  const unsigned char *limit)
1293 {
1294 	struct xt_entry_match *ematch;
1295 	struct xt_entry_target *t;
1296 	struct xt_target *target;
1297 	unsigned int entry_offset;
1298 	unsigned int j;
1299 	int ret, off;
1300 
1301 	if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
1302 	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
1303 	    (unsigned char *)e + e->next_offset > limit)
1304 		return -EINVAL;
1305 
1306 	if (e->next_offset < sizeof(struct compat_ipt_entry) +
1307 			     sizeof(struct compat_xt_entry_target))
1308 		return -EINVAL;
1309 
1310 	if (!ip_checkentry(&e->ip))
1311 		return -EINVAL;
1312 
1313 	ret = xt_compat_check_entry_offsets(e, e->elems,
1314 					    e->target_offset, e->next_offset);
1315 	if (ret)
1316 		return ret;
1317 
1318 	off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1319 	entry_offset = (void *)e - (void *)base;
1320 	j = 0;
1321 	xt_ematch_foreach(ematch, e) {
1322 		ret = compat_find_calc_match(ematch, &e->ip, &off);
1323 		if (ret != 0)
1324 			goto release_matches;
1325 		++j;
1326 	}
1327 
1328 	t = compat_ipt_get_target(e);
1329 	target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
1330 					t->u.user.revision);
1331 	if (IS_ERR(target)) {
1332 		ret = PTR_ERR(target);
1333 		goto release_matches;
1334 	}
1335 	t->u.kernel.target = target;
1336 
1337 	off += xt_compat_target_offset(target);
1338 	*size += off;
1339 	ret = xt_compat_add_offset(AF_INET, entry_offset, off);
1340 	if (ret)
1341 		goto out;
1342 
1343 	return 0;
1344 
1345 out:
1346 	module_put(t->u.kernel.target->me);
1347 release_matches:
1348 	xt_ematch_foreach(ematch, e) {
1349 		if (j-- == 0)
1350 			break;
1351 		module_put(ematch->u.kernel.match->me);
1352 	}
1353 	return ret;
1354 }
1355 
1356 static void
compat_copy_entry_from_user(struct compat_ipt_entry * e,void ** dstptr,unsigned int * size,struct xt_table_info * newinfo,unsigned char * base)1357 compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
1358 			    unsigned int *size,
1359 			    struct xt_table_info *newinfo, unsigned char *base)
1360 {
1361 	struct xt_entry_target *t;
1362 	struct ipt_entry *de;
1363 	unsigned int origsize;
1364 	int h;
1365 	struct xt_entry_match *ematch;
1366 
1367 	origsize = *size;
1368 	de = *dstptr;
1369 	memcpy(de, e, sizeof(struct ipt_entry));
1370 	memcpy(&de->counters, &e->counters, sizeof(e->counters));
1371 
1372 	*dstptr += sizeof(struct ipt_entry);
1373 	*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
1374 
1375 	xt_ematch_foreach(ematch, e)
1376 		xt_compat_match_from_user(ematch, dstptr, size);
1377 
1378 	de->target_offset = e->target_offset - (origsize - *size);
1379 	t = compat_ipt_get_target(e);
1380 	xt_compat_target_from_user(t, dstptr, size);
1381 
1382 	de->next_offset = e->next_offset - (origsize - *size);
1383 
1384 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1385 		if ((unsigned char *)de - base < newinfo->hook_entry[h])
1386 			newinfo->hook_entry[h] -= origsize - *size;
1387 		if ((unsigned char *)de - base < newinfo->underflow[h])
1388 			newinfo->underflow[h] -= origsize - *size;
1389 	}
1390 }
1391 
1392 static int
translate_compat_table(struct net * net,struct xt_table_info ** pinfo,void ** pentry0,const struct compat_ipt_replace * compatr)1393 translate_compat_table(struct net *net,
1394 		       struct xt_table_info **pinfo,
1395 		       void **pentry0,
1396 		       const struct compat_ipt_replace *compatr)
1397 {
1398 	unsigned int i, j;
1399 	struct xt_table_info *newinfo, *info;
1400 	void *pos, *entry0, *entry1;
1401 	struct compat_ipt_entry *iter0;
1402 	struct ipt_replace repl;
1403 	unsigned int size;
1404 	int ret;
1405 
1406 	info = *pinfo;
1407 	entry0 = *pentry0;
1408 	size = compatr->size;
1409 	info->number = compatr->num_entries;
1410 
1411 	j = 0;
1412 	xt_compat_lock(AF_INET);
1413 	ret = xt_compat_init_offsets(AF_INET, compatr->num_entries);
1414 	if (ret)
1415 		goto out_unlock;
1416 	/* Walk through entries, checking offsets. */
1417 	xt_entry_foreach(iter0, entry0, compatr->size) {
1418 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1419 							entry0,
1420 							entry0 + compatr->size);
1421 		if (ret != 0)
1422 			goto out_unlock;
1423 		++j;
1424 	}
1425 
1426 	ret = -EINVAL;
1427 	if (j != compatr->num_entries)
1428 		goto out_unlock;
1429 
1430 	ret = -ENOMEM;
1431 	newinfo = xt_alloc_table_info(size);
1432 	if (!newinfo)
1433 		goto out_unlock;
1434 
1435 	newinfo->number = compatr->num_entries;
1436 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1437 		newinfo->hook_entry[i] = compatr->hook_entry[i];
1438 		newinfo->underflow[i] = compatr->underflow[i];
1439 	}
1440 	entry1 = newinfo->entries;
1441 	pos = entry1;
1442 	size = compatr->size;
1443 	xt_entry_foreach(iter0, entry0, compatr->size)
1444 		compat_copy_entry_from_user(iter0, &pos, &size,
1445 					    newinfo, entry1);
1446 
1447 	/* all module references in entry0 are now gone.
1448 	 * entry1/newinfo contains a 64bit ruleset that looks exactly as
1449 	 * generated by 64bit userspace.
1450 	 *
1451 	 * Call standard translate_table() to validate all hook_entrys,
1452 	 * underflows, check for loops, etc.
1453 	 */
1454 	xt_compat_flush_offsets(AF_INET);
1455 	xt_compat_unlock(AF_INET);
1456 
1457 	memcpy(&repl, compatr, sizeof(*compatr));
1458 
1459 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1460 		repl.hook_entry[i] = newinfo->hook_entry[i];
1461 		repl.underflow[i] = newinfo->underflow[i];
1462 	}
1463 
1464 	repl.num_counters = 0;
1465 	repl.counters = NULL;
1466 	repl.size = newinfo->size;
1467 	ret = translate_table(net, newinfo, entry1, &repl);
1468 	if (ret)
1469 		goto free_newinfo;
1470 
1471 	*pinfo = newinfo;
1472 	*pentry0 = entry1;
1473 	xt_free_table_info(info);
1474 	return 0;
1475 
1476 free_newinfo:
1477 	xt_free_table_info(newinfo);
1478 	return ret;
1479 out_unlock:
1480 	xt_compat_flush_offsets(AF_INET);
1481 	xt_compat_unlock(AF_INET);
1482 	xt_entry_foreach(iter0, entry0, compatr->size) {
1483 		if (j-- == 0)
1484 			break;
1485 		compat_release_entry(iter0);
1486 	}
1487 	return ret;
1488 }
1489 
1490 static int
compat_do_replace(struct net * net,void __user * user,unsigned int len)1491 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1492 {
1493 	int ret;
1494 	struct compat_ipt_replace tmp;
1495 	struct xt_table_info *newinfo;
1496 	void *loc_cpu_entry;
1497 	struct ipt_entry *iter;
1498 
1499 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1500 		return -EFAULT;
1501 
1502 	/* overflow check */
1503 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1504 		return -ENOMEM;
1505 	if (tmp.num_counters == 0)
1506 		return -EINVAL;
1507 
1508 	tmp.name[sizeof(tmp.name)-1] = 0;
1509 
1510 	newinfo = xt_alloc_table_info(tmp.size);
1511 	if (!newinfo)
1512 		return -ENOMEM;
1513 
1514 	loc_cpu_entry = newinfo->entries;
1515 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1516 			   tmp.size) != 0) {
1517 		ret = -EFAULT;
1518 		goto free_newinfo;
1519 	}
1520 
1521 	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1522 	if (ret != 0)
1523 		goto free_newinfo;
1524 
1525 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1526 			   tmp.num_counters, compat_ptr(tmp.counters));
1527 	if (ret)
1528 		goto free_newinfo_untrans;
1529 	return 0;
1530 
1531  free_newinfo_untrans:
1532 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1533 		cleanup_entry(iter, net);
1534  free_newinfo:
1535 	xt_free_table_info(newinfo);
1536 	return ret;
1537 }
1538 
1539 static int
compat_do_ipt_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1540 compat_do_ipt_set_ctl(struct sock *sk,	int cmd, void __user *user,
1541 		      unsigned int len)
1542 {
1543 	int ret;
1544 
1545 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1546 		return -EPERM;
1547 
1548 	switch (cmd) {
1549 	case IPT_SO_SET_REPLACE:
1550 		ret = compat_do_replace(sock_net(sk), user, len);
1551 		break;
1552 
1553 	case IPT_SO_SET_ADD_COUNTERS:
1554 		ret = do_add_counters(sock_net(sk), user, len, 1);
1555 		break;
1556 
1557 	default:
1558 		ret = -EINVAL;
1559 	}
1560 
1561 	return ret;
1562 }
1563 
1564 struct compat_ipt_get_entries {
1565 	char name[XT_TABLE_MAXNAMELEN];
1566 	compat_uint_t size;
1567 	struct compat_ipt_entry entrytable[0];
1568 };
1569 
1570 static int
compat_copy_entries_to_user(unsigned int total_size,struct xt_table * table,void __user * userptr)1571 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1572 			    void __user *userptr)
1573 {
1574 	struct xt_counters *counters;
1575 	const struct xt_table_info *private = table->private;
1576 	void __user *pos;
1577 	unsigned int size;
1578 	int ret = 0;
1579 	unsigned int i = 0;
1580 	struct ipt_entry *iter;
1581 
1582 	counters = alloc_counters(table);
1583 	if (IS_ERR(counters))
1584 		return PTR_ERR(counters);
1585 
1586 	pos = userptr;
1587 	size = total_size;
1588 	xt_entry_foreach(iter, private->entries, total_size) {
1589 		ret = compat_copy_entry_to_user(iter, &pos,
1590 						&size, counters, i++);
1591 		if (ret != 0)
1592 			break;
1593 	}
1594 
1595 	vfree(counters);
1596 	return ret;
1597 }
1598 
1599 static int
compat_get_entries(struct net * net,struct compat_ipt_get_entries __user * uptr,int * len)1600 compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
1601 		   int *len)
1602 {
1603 	int ret;
1604 	struct compat_ipt_get_entries get;
1605 	struct xt_table *t;
1606 
1607 	if (*len < sizeof(get))
1608 		return -EINVAL;
1609 
1610 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1611 		return -EFAULT;
1612 
1613 	if (*len != sizeof(struct compat_ipt_get_entries) + get.size)
1614 		return -EINVAL;
1615 
1616 	get.name[sizeof(get.name) - 1] = '\0';
1617 
1618 	xt_compat_lock(AF_INET);
1619 	t = xt_find_table_lock(net, AF_INET, get.name);
1620 	if (t) {
1621 		const struct xt_table_info *private = t->private;
1622 		struct xt_table_info info;
1623 		ret = compat_table_info(private, &info);
1624 		if (!ret && get.size == info.size)
1625 			ret = compat_copy_entries_to_user(private->size,
1626 							  t, uptr->entrytable);
1627 		else if (!ret)
1628 			ret = -EAGAIN;
1629 
1630 		xt_compat_flush_offsets(AF_INET);
1631 		module_put(t->me);
1632 		xt_table_unlock(t);
1633 	} else
1634 		ret = -ENOENT;
1635 
1636 	xt_compat_unlock(AF_INET);
1637 	return ret;
1638 }
1639 
1640 static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
1641 
1642 static int
compat_do_ipt_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1643 compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1644 {
1645 	int ret;
1646 
1647 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1648 		return -EPERM;
1649 
1650 	switch (cmd) {
1651 	case IPT_SO_GET_INFO:
1652 		ret = get_info(sock_net(sk), user, len, 1);
1653 		break;
1654 	case IPT_SO_GET_ENTRIES:
1655 		ret = compat_get_entries(sock_net(sk), user, len);
1656 		break;
1657 	default:
1658 		ret = do_ipt_get_ctl(sk, cmd, user, len);
1659 	}
1660 	return ret;
1661 }
1662 #endif
1663 
1664 static int
do_ipt_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1665 do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1666 {
1667 	int ret;
1668 
1669 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1670 		return -EPERM;
1671 
1672 	switch (cmd) {
1673 	case IPT_SO_SET_REPLACE:
1674 		ret = do_replace(sock_net(sk), user, len);
1675 		break;
1676 
1677 	case IPT_SO_SET_ADD_COUNTERS:
1678 		ret = do_add_counters(sock_net(sk), user, len, 0);
1679 		break;
1680 
1681 	default:
1682 		ret = -EINVAL;
1683 	}
1684 
1685 	return ret;
1686 }
1687 
1688 static int
do_ipt_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1689 do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1690 {
1691 	int ret;
1692 
1693 	if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1694 		return -EPERM;
1695 
1696 	switch (cmd) {
1697 	case IPT_SO_GET_INFO:
1698 		ret = get_info(sock_net(sk), user, len, 0);
1699 		break;
1700 
1701 	case IPT_SO_GET_ENTRIES:
1702 		ret = get_entries(sock_net(sk), user, len);
1703 		break;
1704 
1705 	case IPT_SO_GET_REVISION_MATCH:
1706 	case IPT_SO_GET_REVISION_TARGET: {
1707 		struct xt_get_revision rev;
1708 		int target;
1709 
1710 		if (*len != sizeof(rev)) {
1711 			ret = -EINVAL;
1712 			break;
1713 		}
1714 		if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1715 			ret = -EFAULT;
1716 			break;
1717 		}
1718 		rev.name[sizeof(rev.name)-1] = 0;
1719 
1720 		if (cmd == IPT_SO_GET_REVISION_TARGET)
1721 			target = 1;
1722 		else
1723 			target = 0;
1724 
1725 		try_then_request_module(xt_find_revision(AF_INET, rev.name,
1726 							 rev.revision,
1727 							 target, &ret),
1728 					"ipt_%s", rev.name);
1729 		break;
1730 	}
1731 
1732 	default:
1733 		ret = -EINVAL;
1734 	}
1735 
1736 	return ret;
1737 }
1738 
__ipt_unregister_table(struct net * net,struct xt_table * table)1739 static void __ipt_unregister_table(struct net *net, struct xt_table *table)
1740 {
1741 	struct xt_table_info *private;
1742 	void *loc_cpu_entry;
1743 	struct module *table_owner = table->me;
1744 	struct ipt_entry *iter;
1745 
1746 	private = xt_unregister_table(table);
1747 
1748 	/* Decrease module usage counts and free resources */
1749 	loc_cpu_entry = private->entries;
1750 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
1751 		cleanup_entry(iter, net);
1752 	if (private->number > private->initial_entries)
1753 		module_put(table_owner);
1754 	xt_free_table_info(private);
1755 }
1756 
ipt_register_table(struct net * net,const struct xt_table * table,const struct ipt_replace * repl,const struct nf_hook_ops * ops,struct xt_table ** res)1757 int ipt_register_table(struct net *net, const struct xt_table *table,
1758 		       const struct ipt_replace *repl,
1759 		       const struct nf_hook_ops *ops, struct xt_table **res)
1760 {
1761 	int ret;
1762 	struct xt_table_info *newinfo;
1763 	struct xt_table_info bootstrap = {0};
1764 	void *loc_cpu_entry;
1765 	struct xt_table *new_table;
1766 
1767 	newinfo = xt_alloc_table_info(repl->size);
1768 	if (!newinfo)
1769 		return -ENOMEM;
1770 
1771 	loc_cpu_entry = newinfo->entries;
1772 	memcpy(loc_cpu_entry, repl->entries, repl->size);
1773 
1774 	ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1775 	if (ret != 0)
1776 		goto out_free;
1777 
1778 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
1779 	if (IS_ERR(new_table)) {
1780 		ret = PTR_ERR(new_table);
1781 		goto out_free;
1782 	}
1783 
1784 	/* set res now, will see skbs right after nf_register_net_hooks */
1785 	WRITE_ONCE(*res, new_table);
1786 
1787 	ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1788 	if (ret != 0) {
1789 		__ipt_unregister_table(net, new_table);
1790 		*res = NULL;
1791 	}
1792 
1793 	return ret;
1794 
1795 out_free:
1796 	xt_free_table_info(newinfo);
1797 	return ret;
1798 }
1799 
ipt_unregister_table(struct net * net,struct xt_table * table,const struct nf_hook_ops * ops)1800 void ipt_unregister_table(struct net *net, struct xt_table *table,
1801 			  const struct nf_hook_ops *ops)
1802 {
1803 	nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1804 	__ipt_unregister_table(net, table);
1805 }
1806 
1807 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1808 static inline bool
icmp_type_code_match(u_int8_t test_type,u_int8_t min_code,u_int8_t max_code,u_int8_t type,u_int8_t code,bool invert)1809 icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1810 		     u_int8_t type, u_int8_t code,
1811 		     bool invert)
1812 {
1813 	return ((test_type == 0xFF) ||
1814 		(type == test_type && code >= min_code && code <= max_code))
1815 		^ invert;
1816 }
1817 
1818 static bool
icmp_match(const struct sk_buff * skb,struct xt_action_param * par)1819 icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
1820 {
1821 	const struct icmphdr *ic;
1822 	struct icmphdr _icmph;
1823 	const struct ipt_icmp *icmpinfo = par->matchinfo;
1824 
1825 	/* Must not be a fragment. */
1826 	if (par->fragoff != 0)
1827 		return false;
1828 
1829 	ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1830 	if (ic == NULL) {
1831 		/* We've been asked to examine this packet, and we
1832 		 * can't.  Hence, no choice but to drop.
1833 		 */
1834 		par->hotdrop = true;
1835 		return false;
1836 	}
1837 
1838 	return icmp_type_code_match(icmpinfo->type,
1839 				    icmpinfo->code[0],
1840 				    icmpinfo->code[1],
1841 				    ic->type, ic->code,
1842 				    !!(icmpinfo->invflags&IPT_ICMP_INV));
1843 }
1844 
icmp_checkentry(const struct xt_mtchk_param * par)1845 static int icmp_checkentry(const struct xt_mtchk_param *par)
1846 {
1847 	const struct ipt_icmp *icmpinfo = par->matchinfo;
1848 
1849 	/* Must specify no unknown invflags */
1850 	return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
1851 }
1852 
1853 static struct xt_target ipt_builtin_tg[] __read_mostly = {
1854 	{
1855 		.name             = XT_STANDARD_TARGET,
1856 		.targetsize       = sizeof(int),
1857 		.family           = NFPROTO_IPV4,
1858 #ifdef CONFIG_COMPAT
1859 		.compatsize       = sizeof(compat_int_t),
1860 		.compat_from_user = compat_standard_from_user,
1861 		.compat_to_user   = compat_standard_to_user,
1862 #endif
1863 	},
1864 	{
1865 		.name             = XT_ERROR_TARGET,
1866 		.target           = ipt_error,
1867 		.targetsize       = XT_FUNCTION_MAXNAMELEN,
1868 		.family           = NFPROTO_IPV4,
1869 	},
1870 };
1871 
1872 static struct nf_sockopt_ops ipt_sockopts = {
1873 	.pf		= PF_INET,
1874 	.set_optmin	= IPT_BASE_CTL,
1875 	.set_optmax	= IPT_SO_SET_MAX+1,
1876 	.set		= do_ipt_set_ctl,
1877 #ifdef CONFIG_COMPAT
1878 	.compat_set	= compat_do_ipt_set_ctl,
1879 #endif
1880 	.get_optmin	= IPT_BASE_CTL,
1881 	.get_optmax	= IPT_SO_GET_MAX+1,
1882 	.get		= do_ipt_get_ctl,
1883 #ifdef CONFIG_COMPAT
1884 	.compat_get	= compat_do_ipt_get_ctl,
1885 #endif
1886 	.owner		= THIS_MODULE,
1887 };
1888 
1889 static struct xt_match ipt_builtin_mt[] __read_mostly = {
1890 	{
1891 		.name       = "icmp",
1892 		.match      = icmp_match,
1893 		.matchsize  = sizeof(struct ipt_icmp),
1894 		.checkentry = icmp_checkentry,
1895 		.proto      = IPPROTO_ICMP,
1896 		.family     = NFPROTO_IPV4,
1897 		.me	    = THIS_MODULE,
1898 	},
1899 };
1900 
ip_tables_net_init(struct net * net)1901 static int __net_init ip_tables_net_init(struct net *net)
1902 {
1903 	return xt_proto_init(net, NFPROTO_IPV4);
1904 }
1905 
ip_tables_net_exit(struct net * net)1906 static void __net_exit ip_tables_net_exit(struct net *net)
1907 {
1908 	xt_proto_fini(net, NFPROTO_IPV4);
1909 }
1910 
1911 static struct pernet_operations ip_tables_net_ops = {
1912 	.init = ip_tables_net_init,
1913 	.exit = ip_tables_net_exit,
1914 };
1915 
ip_tables_init(void)1916 static int __init ip_tables_init(void)
1917 {
1918 	int ret;
1919 
1920 	ret = register_pernet_subsys(&ip_tables_net_ops);
1921 	if (ret < 0)
1922 		goto err1;
1923 
1924 	/* No one else will be downing sem now, so we won't sleep */
1925 	ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1926 	if (ret < 0)
1927 		goto err2;
1928 	ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1929 	if (ret < 0)
1930 		goto err4;
1931 
1932 	/* Register setsockopt */
1933 	ret = nf_register_sockopt(&ipt_sockopts);
1934 	if (ret < 0)
1935 		goto err5;
1936 
1937 	pr_info("(C) 2000-2006 Netfilter Core Team\n");
1938 	return 0;
1939 
1940 err5:
1941 	xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1942 err4:
1943 	xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1944 err2:
1945 	unregister_pernet_subsys(&ip_tables_net_ops);
1946 err1:
1947 	return ret;
1948 }
1949 
ip_tables_fini(void)1950 static void __exit ip_tables_fini(void)
1951 {
1952 	nf_unregister_sockopt(&ipt_sockopts);
1953 
1954 	xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
1955 	xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
1956 	unregister_pernet_subsys(&ip_tables_net_ops);
1957 }
1958 
1959 EXPORT_SYMBOL(ipt_register_table);
1960 EXPORT_SYMBOL(ipt_unregister_table);
1961 EXPORT_SYMBOL(ipt_do_table);
1962 module_init(ip_tables_init);
1963 module_exit(ip_tables_fini);
1964