• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * x_tables core - Backend for {ip,ip6,arp}_tables
3  *
4  * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5  * Copyright (C) 2006-2012 Patrick McHardy <kaber@trash.net>
6  *
7  * Based on existing ip_tables code which is
8  *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
9  *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/socket.h>
20 #include <linux/net.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/string.h>
24 #include <linux/vmalloc.h>
25 #include <linux/mutex.h>
26 #include <linux/mm.h>
27 #include <linux/slab.h>
28 #include <linux/audit.h>
29 #include <linux/user_namespace.h>
30 #include <net/net_namespace.h>
31 
32 #include <linux/netfilter/x_tables.h>
33 #include <linux/netfilter_arp.h>
34 #include <linux/netfilter_ipv4/ip_tables.h>
35 #include <linux/netfilter_ipv6/ip6_tables.h>
36 #include <linux/netfilter_arp/arp_tables.h>
37 
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40 MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
41 
42 #define XT_PCPU_BLOCK_SIZE 4096
43 
44 struct compat_delta {
45 	unsigned int offset; /* offset in kernel */
46 	int delta; /* delta in 32bit user land */
47 };
48 
49 struct xt_af {
50 	struct mutex mutex;
51 	struct list_head match;
52 	struct list_head target;
53 #ifdef CONFIG_COMPAT
54 	struct mutex compat_mutex;
55 	struct compat_delta *compat_tab;
56 	unsigned int number; /* number of slots in compat_tab[] */
57 	unsigned int cur; /* number of used slots in compat_tab[] */
58 #endif
59 };
60 
61 static struct xt_af *xt;
62 
63 static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
64 	[NFPROTO_UNSPEC] = "x",
65 	[NFPROTO_IPV4]   = "ip",
66 	[NFPROTO_ARP]    = "arp",
67 	[NFPROTO_BRIDGE] = "eb",
68 	[NFPROTO_IPV6]   = "ip6",
69 };
70 
71 /* Registration hooks for targets. */
xt_register_target(struct xt_target * target)72 int xt_register_target(struct xt_target *target)
73 {
74 	u_int8_t af = target->family;
75 
76 	mutex_lock(&xt[af].mutex);
77 	list_add(&target->list, &xt[af].target);
78 	mutex_unlock(&xt[af].mutex);
79 	return 0;
80 }
81 EXPORT_SYMBOL(xt_register_target);
82 
83 void
xt_unregister_target(struct xt_target * target)84 xt_unregister_target(struct xt_target *target)
85 {
86 	u_int8_t af = target->family;
87 
88 	mutex_lock(&xt[af].mutex);
89 	list_del(&target->list);
90 	mutex_unlock(&xt[af].mutex);
91 }
92 EXPORT_SYMBOL(xt_unregister_target);
93 
94 int
xt_register_targets(struct xt_target * target,unsigned int n)95 xt_register_targets(struct xt_target *target, unsigned int n)
96 {
97 	unsigned int i;
98 	int err = 0;
99 
100 	for (i = 0; i < n; i++) {
101 		err = xt_register_target(&target[i]);
102 		if (err)
103 			goto err;
104 	}
105 	return err;
106 
107 err:
108 	if (i > 0)
109 		xt_unregister_targets(target, i);
110 	return err;
111 }
112 EXPORT_SYMBOL(xt_register_targets);
113 
114 void
xt_unregister_targets(struct xt_target * target,unsigned int n)115 xt_unregister_targets(struct xt_target *target, unsigned int n)
116 {
117 	while (n-- > 0)
118 		xt_unregister_target(&target[n]);
119 }
120 EXPORT_SYMBOL(xt_unregister_targets);
121 
xt_register_match(struct xt_match * match)122 int xt_register_match(struct xt_match *match)
123 {
124 	u_int8_t af = match->family;
125 
126 	mutex_lock(&xt[af].mutex);
127 	list_add(&match->list, &xt[af].match);
128 	mutex_unlock(&xt[af].mutex);
129 	return 0;
130 }
131 EXPORT_SYMBOL(xt_register_match);
132 
133 void
xt_unregister_match(struct xt_match * match)134 xt_unregister_match(struct xt_match *match)
135 {
136 	u_int8_t af = match->family;
137 
138 	mutex_lock(&xt[af].mutex);
139 	list_del(&match->list);
140 	mutex_unlock(&xt[af].mutex);
141 }
142 EXPORT_SYMBOL(xt_unregister_match);
143 
144 int
xt_register_matches(struct xt_match * match,unsigned int n)145 xt_register_matches(struct xt_match *match, unsigned int n)
146 {
147 	unsigned int i;
148 	int err = 0;
149 
150 	for (i = 0; i < n; i++) {
151 		err = xt_register_match(&match[i]);
152 		if (err)
153 			goto err;
154 	}
155 	return err;
156 
157 err:
158 	if (i > 0)
159 		xt_unregister_matches(match, i);
160 	return err;
161 }
162 EXPORT_SYMBOL(xt_register_matches);
163 
164 void
xt_unregister_matches(struct xt_match * match,unsigned int n)165 xt_unregister_matches(struct xt_match *match, unsigned int n)
166 {
167 	while (n-- > 0)
168 		xt_unregister_match(&match[n]);
169 }
170 EXPORT_SYMBOL(xt_unregister_matches);
171 
172 
173 /*
174  * These are weird, but module loading must not be done with mutex
175  * held (since they will register), and we have to have a single
176  * function to use.
177  */
178 
179 /* Find match, grabs ref.  Returns ERR_PTR() on error. */
xt_find_match(u8 af,const char * name,u8 revision)180 struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
181 {
182 	struct xt_match *m;
183 	int err = -ENOENT;
184 
185 	mutex_lock(&xt[af].mutex);
186 	list_for_each_entry(m, &xt[af].match, list) {
187 		if (strcmp(m->name, name) == 0) {
188 			if (m->revision == revision) {
189 				if (try_module_get(m->me)) {
190 					mutex_unlock(&xt[af].mutex);
191 					return m;
192 				}
193 			} else
194 				err = -EPROTOTYPE; /* Found something. */
195 		}
196 	}
197 	mutex_unlock(&xt[af].mutex);
198 
199 	if (af != NFPROTO_UNSPEC)
200 		/* Try searching again in the family-independent list */
201 		return xt_find_match(NFPROTO_UNSPEC, name, revision);
202 
203 	return ERR_PTR(err);
204 }
205 EXPORT_SYMBOL(xt_find_match);
206 
207 struct xt_match *
xt_request_find_match(uint8_t nfproto,const char * name,uint8_t revision)208 xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
209 {
210 	struct xt_match *match;
211 
212 	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
213 		return ERR_PTR(-EINVAL);
214 
215 	match = xt_find_match(nfproto, name, revision);
216 	if (IS_ERR(match)) {
217 		request_module("%st_%s", xt_prefix[nfproto], name);
218 		match = xt_find_match(nfproto, name, revision);
219 	}
220 
221 	return match;
222 }
223 EXPORT_SYMBOL_GPL(xt_request_find_match);
224 
225 /* Find target, grabs ref.  Returns ERR_PTR() on error. */
xt_find_target(u8 af,const char * name,u8 revision)226 struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
227 {
228 	struct xt_target *t;
229 	int err = -ENOENT;
230 
231 	mutex_lock(&xt[af].mutex);
232 	list_for_each_entry(t, &xt[af].target, list) {
233 		if (strcmp(t->name, name) == 0) {
234 			if (t->revision == revision) {
235 				if (try_module_get(t->me)) {
236 					mutex_unlock(&xt[af].mutex);
237 					return t;
238 				}
239 			} else
240 				err = -EPROTOTYPE; /* Found something. */
241 		}
242 	}
243 	mutex_unlock(&xt[af].mutex);
244 
245 	if (af != NFPROTO_UNSPEC)
246 		/* Try searching again in the family-independent list */
247 		return xt_find_target(NFPROTO_UNSPEC, name, revision);
248 
249 	return ERR_PTR(err);
250 }
251 EXPORT_SYMBOL(xt_find_target);
252 
xt_request_find_target(u8 af,const char * name,u8 revision)253 struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
254 {
255 	struct xt_target *target;
256 
257 	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
258 		return ERR_PTR(-EINVAL);
259 
260 	target = xt_find_target(af, name, revision);
261 	if (IS_ERR(target)) {
262 		request_module("%st_%s", xt_prefix[af], name);
263 		target = xt_find_target(af, name, revision);
264 	}
265 
266 	return target;
267 }
268 EXPORT_SYMBOL_GPL(xt_request_find_target);
269 
match_revfn(u8 af,const char * name,u8 revision,int * bestp)270 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
271 {
272 	const struct xt_match *m;
273 	int have_rev = 0;
274 
275 	list_for_each_entry(m, &xt[af].match, list) {
276 		if (strcmp(m->name, name) == 0) {
277 			if (m->revision > *bestp)
278 				*bestp = m->revision;
279 			if (m->revision == revision)
280 				have_rev = 1;
281 		}
282 	}
283 
284 	if (af != NFPROTO_UNSPEC && !have_rev)
285 		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
286 
287 	return have_rev;
288 }
289 
target_revfn(u8 af,const char * name,u8 revision,int * bestp)290 static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
291 {
292 	const struct xt_target *t;
293 	int have_rev = 0;
294 
295 	list_for_each_entry(t, &xt[af].target, list) {
296 		if (strcmp(t->name, name) == 0) {
297 			if (t->revision > *bestp)
298 				*bestp = t->revision;
299 			if (t->revision == revision)
300 				have_rev = 1;
301 		}
302 	}
303 
304 	if (af != NFPROTO_UNSPEC && !have_rev)
305 		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
306 
307 	return have_rev;
308 }
309 
310 /* Returns true or false (if no such extension at all) */
xt_find_revision(u8 af,const char * name,u8 revision,int target,int * err)311 int xt_find_revision(u8 af, const char *name, u8 revision, int target,
312 		     int *err)
313 {
314 	int have_rev, best = -1;
315 
316 	mutex_lock(&xt[af].mutex);
317 	if (target == 1)
318 		have_rev = target_revfn(af, name, revision, &best);
319 	else
320 		have_rev = match_revfn(af, name, revision, &best);
321 	mutex_unlock(&xt[af].mutex);
322 
323 	/* Nothing at all?  Return 0 to try loading module. */
324 	if (best == -1) {
325 		*err = -ENOENT;
326 		return 0;
327 	}
328 
329 	*err = best;
330 	if (!have_rev)
331 		*err = -EPROTONOSUPPORT;
332 	return 1;
333 }
334 EXPORT_SYMBOL_GPL(xt_find_revision);
335 
336 static char *
textify_hooks(char * buf,size_t size,unsigned int mask,uint8_t nfproto)337 textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
338 {
339 	static const char *const inetbr_names[] = {
340 		"PREROUTING", "INPUT", "FORWARD",
341 		"OUTPUT", "POSTROUTING", "BROUTING",
342 	};
343 	static const char *const arp_names[] = {
344 		"INPUT", "FORWARD", "OUTPUT",
345 	};
346 	const char *const *names;
347 	unsigned int i, max;
348 	char *p = buf;
349 	bool np = false;
350 	int res;
351 
352 	names = (nfproto == NFPROTO_ARP) ? arp_names : inetbr_names;
353 	max   = (nfproto == NFPROTO_ARP) ? ARRAY_SIZE(arp_names) :
354 	                                   ARRAY_SIZE(inetbr_names);
355 	*p = '\0';
356 	for (i = 0; i < max; ++i) {
357 		if (!(mask & (1 << i)))
358 			continue;
359 		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
360 		if (res > 0) {
361 			size -= res;
362 			p += res;
363 		}
364 		np = true;
365 	}
366 
367 	return buf;
368 }
369 
370 /**
371  * xt_check_proc_name - check that name is suitable for /proc file creation
372  *
373  * @name: file name candidate
374  * @size: length of buffer
375  *
376  * some x_tables modules wish to create a file in /proc.
377  * This function makes sure that the name is suitable for this
378  * purpose, it checks that name is NUL terminated and isn't a 'special'
379  * name, like "..".
380  *
381  * returns negative number on error or 0 if name is useable.
382  */
xt_check_proc_name(const char * name,unsigned int size)383 int xt_check_proc_name(const char *name, unsigned int size)
384 {
385 	if (name[0] == '\0')
386 		return -EINVAL;
387 
388 	if (strnlen(name, size) == size)
389 		return -ENAMETOOLONG;
390 
391 	if (strcmp(name, ".") == 0 ||
392 	    strcmp(name, "..") == 0 ||
393 	    strchr(name, '/'))
394 		return -EINVAL;
395 
396 	return 0;
397 }
398 EXPORT_SYMBOL(xt_check_proc_name);
399 
xt_check_match(struct xt_mtchk_param * par,unsigned int size,u_int8_t proto,bool inv_proto)400 int xt_check_match(struct xt_mtchk_param *par,
401 		   unsigned int size, u_int8_t proto, bool inv_proto)
402 {
403 	int ret;
404 
405 	if (XT_ALIGN(par->match->matchsize) != size &&
406 	    par->match->matchsize != -1) {
407 		/*
408 		 * ebt_among is exempt from centralized matchsize checking
409 		 * because it uses a dynamic-size data set.
410 		 */
411 		pr_err("%s_tables: %s.%u match: invalid size "
412 		       "%u (kernel) != (user) %u\n",
413 		       xt_prefix[par->family], par->match->name,
414 		       par->match->revision,
415 		       XT_ALIGN(par->match->matchsize), size);
416 		return -EINVAL;
417 	}
418 	if (par->match->table != NULL &&
419 	    strcmp(par->match->table, par->table) != 0) {
420 		pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
421 		       xt_prefix[par->family], par->match->name,
422 		       par->match->table, par->table);
423 		return -EINVAL;
424 	}
425 	if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
426 		char used[64], allow[64];
427 
428 		pr_err("%s_tables: %s match: used from hooks %s, but only "
429 		       "valid from %s\n",
430 		       xt_prefix[par->family], par->match->name,
431 		       textify_hooks(used, sizeof(used), par->hook_mask,
432 		                     par->family),
433 		       textify_hooks(allow, sizeof(allow), par->match->hooks,
434 		                     par->family));
435 		return -EINVAL;
436 	}
437 	if (par->match->proto && (par->match->proto != proto || inv_proto)) {
438 		pr_err("%s_tables: %s match: only valid for protocol %u\n",
439 		       xt_prefix[par->family], par->match->name,
440 		       par->match->proto);
441 		return -EINVAL;
442 	}
443 	if (par->match->checkentry != NULL) {
444 		ret = par->match->checkentry(par);
445 		if (ret < 0)
446 			return ret;
447 		else if (ret > 0)
448 			/* Flag up potential errors. */
449 			return -EIO;
450 	}
451 	return 0;
452 }
453 EXPORT_SYMBOL_GPL(xt_check_match);
454 
455 /** xt_check_entry_match - check that matches end before start of target
456  *
457  * @match: beginning of xt_entry_match
458  * @target: beginning of this rules target (alleged end of matches)
459  * @alignment: alignment requirement of match structures
460  *
461  * Validates that all matches add up to the beginning of the target,
462  * and that each match covers at least the base structure size.
463  *
464  * Return: 0 on success, negative errno on failure.
465  */
xt_check_entry_match(const char * match,const char * target,const size_t alignment)466 static int xt_check_entry_match(const char *match, const char *target,
467 				const size_t alignment)
468 {
469 	const struct xt_entry_match *pos;
470 	int length = target - match;
471 
472 	if (length == 0) /* no matches */
473 		return 0;
474 
475 	pos = (struct xt_entry_match *)match;
476 	do {
477 		if ((unsigned long)pos % alignment)
478 			return -EINVAL;
479 
480 		if (length < (int)sizeof(struct xt_entry_match))
481 			return -EINVAL;
482 
483 		if (pos->u.match_size < sizeof(struct xt_entry_match))
484 			return -EINVAL;
485 
486 		if (pos->u.match_size > length)
487 			return -EINVAL;
488 
489 		length -= pos->u.match_size;
490 		pos = ((void *)((char *)(pos) + (pos)->u.match_size));
491 	} while (length > 0);
492 
493 	return 0;
494 }
495 
496 #ifdef CONFIG_COMPAT
xt_compat_add_offset(u_int8_t af,unsigned int offset,int delta)497 int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
498 {
499 	struct xt_af *xp = &xt[af];
500 
501 	if (!xp->compat_tab) {
502 		if (!xp->number)
503 			return -EINVAL;
504 		xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
505 		if (!xp->compat_tab)
506 			return -ENOMEM;
507 		xp->cur = 0;
508 	}
509 
510 	if (xp->cur >= xp->number)
511 		return -EINVAL;
512 
513 	if (xp->cur)
514 		delta += xp->compat_tab[xp->cur - 1].delta;
515 	xp->compat_tab[xp->cur].offset = offset;
516 	xp->compat_tab[xp->cur].delta = delta;
517 	xp->cur++;
518 	return 0;
519 }
520 EXPORT_SYMBOL_GPL(xt_compat_add_offset);
521 
xt_compat_flush_offsets(u_int8_t af)522 void xt_compat_flush_offsets(u_int8_t af)
523 {
524 	if (xt[af].compat_tab) {
525 		vfree(xt[af].compat_tab);
526 		xt[af].compat_tab = NULL;
527 		xt[af].number = 0;
528 		xt[af].cur = 0;
529 	}
530 }
531 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
532 
xt_compat_calc_jump(u_int8_t af,unsigned int offset)533 int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
534 {
535 	struct compat_delta *tmp = xt[af].compat_tab;
536 	int mid, left = 0, right = xt[af].cur - 1;
537 
538 	while (left <= right) {
539 		mid = (left + right) >> 1;
540 		if (offset > tmp[mid].offset)
541 			left = mid + 1;
542 		else if (offset < tmp[mid].offset)
543 			right = mid - 1;
544 		else
545 			return mid ? tmp[mid - 1].delta : 0;
546 	}
547 	return left ? tmp[left - 1].delta : 0;
548 }
549 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
550 
xt_compat_init_offsets(u_int8_t af,unsigned int number)551 void xt_compat_init_offsets(u_int8_t af, unsigned int number)
552 {
553 	xt[af].number = number;
554 	xt[af].cur = 0;
555 }
556 EXPORT_SYMBOL(xt_compat_init_offsets);
557 
xt_compat_match_offset(const struct xt_match * match)558 int xt_compat_match_offset(const struct xt_match *match)
559 {
560 	u_int16_t csize = match->compatsize ? : match->matchsize;
561 	return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
562 }
563 EXPORT_SYMBOL_GPL(xt_compat_match_offset);
564 
xt_compat_match_from_user(struct xt_entry_match * m,void ** dstptr,unsigned int * size)565 void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
566 			       unsigned int *size)
567 {
568 	const struct xt_match *match = m->u.kernel.match;
569 	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
570 	int pad, off = xt_compat_match_offset(match);
571 	u_int16_t msize = cm->u.user.match_size;
572 	char name[sizeof(m->u.user.name)];
573 
574 	m = *dstptr;
575 	memcpy(m, cm, sizeof(*cm));
576 	if (match->compat_from_user)
577 		match->compat_from_user(m->data, cm->data);
578 	else
579 		memcpy(m->data, cm->data, msize - sizeof(*cm));
580 	pad = XT_ALIGN(match->matchsize) - match->matchsize;
581 	if (pad > 0)
582 		memset(m->data + match->matchsize, 0, pad);
583 
584 	msize += off;
585 	m->u.user.match_size = msize;
586 	strlcpy(name, match->name, sizeof(name));
587 	module_put(match->me);
588 	strncpy(m->u.user.name, name, sizeof(m->u.user.name));
589 
590 	*size += off;
591 	*dstptr += msize;
592 }
593 EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
594 
xt_compat_match_to_user(const struct xt_entry_match * m,void __user ** dstptr,unsigned int * size)595 int xt_compat_match_to_user(const struct xt_entry_match *m,
596 			    void __user **dstptr, unsigned int *size)
597 {
598 	const struct xt_match *match = m->u.kernel.match;
599 	struct compat_xt_entry_match __user *cm = *dstptr;
600 	int off = xt_compat_match_offset(match);
601 	u_int16_t msize = m->u.user.match_size - off;
602 
603 	if (copy_to_user(cm, m, sizeof(*cm)) ||
604 	    put_user(msize, &cm->u.user.match_size) ||
605 	    copy_to_user(cm->u.user.name, m->u.kernel.match->name,
606 			 strlen(m->u.kernel.match->name) + 1))
607 		return -EFAULT;
608 
609 	if (match->compat_to_user) {
610 		if (match->compat_to_user((void __user *)cm->data, m->data))
611 			return -EFAULT;
612 	} else {
613 		if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
614 			return -EFAULT;
615 	}
616 
617 	*size -= off;
618 	*dstptr += msize;
619 	return 0;
620 }
621 EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
622 
623 /* non-compat version may have padding after verdict */
624 struct compat_xt_standard_target {
625 	struct compat_xt_entry_target t;
626 	compat_uint_t verdict;
627 };
628 
xt_compat_check_entry_offsets(const void * base,const char * elems,unsigned int target_offset,unsigned int next_offset)629 int xt_compat_check_entry_offsets(const void *base, const char *elems,
630 				  unsigned int target_offset,
631 				  unsigned int next_offset)
632 {
633 	long size_of_base_struct = elems - (const char *)base;
634 	const struct compat_xt_entry_target *t;
635 	const char *e = base;
636 
637 	if (target_offset < size_of_base_struct)
638 		return -EINVAL;
639 
640 	if (target_offset + sizeof(*t) > next_offset)
641 		return -EINVAL;
642 
643 	t = (void *)(e + target_offset);
644 	if (t->u.target_size < sizeof(*t))
645 		return -EINVAL;
646 
647 	if (target_offset + t->u.target_size > next_offset)
648 		return -EINVAL;
649 
650 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
651 	    COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
652 		return -EINVAL;
653 
654 	/* compat_xt_entry match has less strict aligment requirements,
655 	 * otherwise they are identical.  In case of padding differences
656 	 * we need to add compat version of xt_check_entry_match.
657 	 */
658 	BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
659 
660 	return xt_check_entry_match(elems, base + target_offset,
661 				    __alignof__(struct compat_xt_entry_match));
662 }
663 EXPORT_SYMBOL(xt_compat_check_entry_offsets);
664 #endif /* CONFIG_COMPAT */
665 
666 /**
667  * xt_check_entry_offsets - validate arp/ip/ip6t_entry
668  *
669  * @base: pointer to arp/ip/ip6t_entry
670  * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
671  * @target_offset: the arp/ip/ip6_t->target_offset
672  * @next_offset: the arp/ip/ip6_t->next_offset
673  *
674  * validates that target_offset and next_offset are sane and that all
675  * match sizes (if any) align with the target offset.
676  *
677  * This function does not validate the targets or matches themselves, it
678  * only tests that all the offsets and sizes are correct, that all
679  * match structures are aligned, and that the last structure ends where
680  * the target structure begins.
681  *
682  * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
683  *
684  * The arp/ip/ip6t_entry structure @base must have passed following tests:
685  * - it must point to a valid memory location
686  * - base to base + next_offset must be accessible, i.e. not exceed allocated
687  *   length.
688  *
689  * A well-formed entry looks like this:
690  *
691  * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
692  * e->elems[]-----'                              |               |
693  *                matchsize                      |               |
694  *                                matchsize      |               |
695  *                                               |               |
696  * target_offset---------------------------------'               |
697  * next_offset---------------------------------------------------'
698  *
699  * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
700  *          This is where matches (if any) and the target reside.
701  * target_offset: beginning of target.
702  * next_offset: start of the next rule; also: size of this rule.
703  * Since targets have a minimum size, target_offset + minlen <= next_offset.
704  *
705  * Every match stores its size, sum of sizes must not exceed target_offset.
706  *
707  * Return: 0 on success, negative errno on failure.
708  */
xt_check_entry_offsets(const void * base,const char * elems,unsigned int target_offset,unsigned int next_offset)709 int xt_check_entry_offsets(const void *base,
710 			   const char *elems,
711 			   unsigned int target_offset,
712 			   unsigned int next_offset)
713 {
714 	long size_of_base_struct = elems - (const char *)base;
715 	const struct xt_entry_target *t;
716 	const char *e = base;
717 
718 	/* target start is within the ip/ip6/arpt_entry struct */
719 	if (target_offset < size_of_base_struct)
720 		return -EINVAL;
721 
722 	if (target_offset + sizeof(*t) > next_offset)
723 		return -EINVAL;
724 
725 	t = (void *)(e + target_offset);
726 	if (t->u.target_size < sizeof(*t))
727 		return -EINVAL;
728 
729 	if (target_offset + t->u.target_size > next_offset)
730 		return -EINVAL;
731 
732 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
733 	    XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
734 		return -EINVAL;
735 
736 	return xt_check_entry_match(elems, base + target_offset,
737 				    __alignof__(struct xt_entry_match));
738 }
739 EXPORT_SYMBOL(xt_check_entry_offsets);
740 
741 /**
742  * xt_alloc_entry_offsets - allocate array to store rule head offsets
743  *
744  * @size: number of entries
745  *
746  * Return: NULL or kmalloc'd or vmalloc'd array
747  */
xt_alloc_entry_offsets(unsigned int size)748 unsigned int *xt_alloc_entry_offsets(unsigned int size)
749 {
750 	unsigned int *off;
751 
752 	off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
753 
754 	if (off)
755 		return off;
756 
757 	if (size < (SIZE_MAX / sizeof(unsigned int)))
758 		off = vmalloc(size * sizeof(unsigned int));
759 
760 	return off;
761 }
762 EXPORT_SYMBOL(xt_alloc_entry_offsets);
763 
764 /**
765  * xt_find_jump_offset - check if target is a valid jump offset
766  *
767  * @offsets: array containing all valid rule start offsets of a rule blob
768  * @target: the jump target to search for
769  * @size: entries in @offset
770  */
xt_find_jump_offset(const unsigned int * offsets,unsigned int target,unsigned int size)771 bool xt_find_jump_offset(const unsigned int *offsets,
772 			 unsigned int target, unsigned int size)
773 {
774 	int m, low = 0, hi = size;
775 
776 	while (hi > low) {
777 		m = (low + hi) / 2u;
778 
779 		if (offsets[m] > target)
780 			hi = m;
781 		else if (offsets[m] < target)
782 			low = m + 1;
783 		else
784 			return true;
785 	}
786 
787 	return false;
788 }
789 EXPORT_SYMBOL(xt_find_jump_offset);
790 
xt_check_target(struct xt_tgchk_param * par,unsigned int size,u_int8_t proto,bool inv_proto)791 int xt_check_target(struct xt_tgchk_param *par,
792 		    unsigned int size, u_int8_t proto, bool inv_proto)
793 {
794 	int ret;
795 
796 	if (XT_ALIGN(par->target->targetsize) != size) {
797 		pr_err("%s_tables: %s.%u target: invalid size "
798 		       "%u (kernel) != (user) %u\n",
799 		       xt_prefix[par->family], par->target->name,
800 		       par->target->revision,
801 		       XT_ALIGN(par->target->targetsize), size);
802 		return -EINVAL;
803 	}
804 	if (par->target->table != NULL &&
805 	    strcmp(par->target->table, par->table) != 0) {
806 		pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
807 		       xt_prefix[par->family], par->target->name,
808 		       par->target->table, par->table);
809 		return -EINVAL;
810 	}
811 	if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
812 		char used[64], allow[64];
813 
814 		pr_err("%s_tables: %s target: used from hooks %s, but only "
815 		       "usable from %s\n",
816 		       xt_prefix[par->family], par->target->name,
817 		       textify_hooks(used, sizeof(used), par->hook_mask,
818 		                     par->family),
819 		       textify_hooks(allow, sizeof(allow), par->target->hooks,
820 		                     par->family));
821 		return -EINVAL;
822 	}
823 	if (par->target->proto && (par->target->proto != proto || inv_proto)) {
824 		pr_err("%s_tables: %s target: only valid for protocol %u\n",
825 		       xt_prefix[par->family], par->target->name,
826 		       par->target->proto);
827 		return -EINVAL;
828 	}
829 	if (par->target->checkentry != NULL) {
830 		ret = par->target->checkentry(par);
831 		if (ret < 0)
832 			return ret;
833 		else if (ret > 0)
834 			/* Flag up potential errors. */
835 			return -EIO;
836 	}
837 	return 0;
838 }
839 EXPORT_SYMBOL_GPL(xt_check_target);
840 
841 /**
842  * xt_copy_counters_from_user - copy counters and metadata from userspace
843  *
844  * @user: src pointer to userspace memory
845  * @len: alleged size of userspace memory
846  * @info: where to store the xt_counters_info metadata
847  * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
848  *
849  * Copies counter meta data from @user and stores it in @info.
850  *
851  * vmallocs memory to hold the counters, then copies the counter data
852  * from @user to the new memory and returns a pointer to it.
853  *
854  * If @compat is true, @info gets converted automatically to the 64bit
855  * representation.
856  *
857  * The metadata associated with the counters is stored in @info.
858  *
859  * Return: returns pointer that caller has to test via IS_ERR().
860  * If IS_ERR is false, caller has to vfree the pointer.
861  */
xt_copy_counters_from_user(const void __user * user,unsigned int len,struct xt_counters_info * info,bool compat)862 void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
863 				 struct xt_counters_info *info, bool compat)
864 {
865 	void *mem;
866 	u64 size;
867 
868 #ifdef CONFIG_COMPAT
869 	if (compat) {
870 		/* structures only differ in size due to alignment */
871 		struct compat_xt_counters_info compat_tmp;
872 
873 		if (len <= sizeof(compat_tmp))
874 			return ERR_PTR(-EINVAL);
875 
876 		len -= sizeof(compat_tmp);
877 		if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
878 			return ERR_PTR(-EFAULT);
879 
880 		strlcpy(info->name, compat_tmp.name, sizeof(info->name));
881 		info->num_counters = compat_tmp.num_counters;
882 		user += sizeof(compat_tmp);
883 	} else
884 #endif
885 	{
886 		if (len <= sizeof(*info))
887 			return ERR_PTR(-EINVAL);
888 
889 		len -= sizeof(*info);
890 		if (copy_from_user(info, user, sizeof(*info)) != 0)
891 			return ERR_PTR(-EFAULT);
892 
893 		info->name[sizeof(info->name) - 1] = '\0';
894 		user += sizeof(*info);
895 	}
896 
897 	size = sizeof(struct xt_counters);
898 	size *= info->num_counters;
899 
900 	if (size != (u64)len)
901 		return ERR_PTR(-EINVAL);
902 
903 	mem = vmalloc(len);
904 	if (!mem)
905 		return ERR_PTR(-ENOMEM);
906 
907 	if (copy_from_user(mem, user, len) == 0)
908 		return mem;
909 
910 	vfree(mem);
911 	return ERR_PTR(-EFAULT);
912 }
913 EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
914 
915 #ifdef CONFIG_COMPAT
xt_compat_target_offset(const struct xt_target * target)916 int xt_compat_target_offset(const struct xt_target *target)
917 {
918 	u_int16_t csize = target->compatsize ? : target->targetsize;
919 	return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
920 }
921 EXPORT_SYMBOL_GPL(xt_compat_target_offset);
922 
xt_compat_target_from_user(struct xt_entry_target * t,void ** dstptr,unsigned int * size)923 void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
924 				unsigned int *size)
925 {
926 	const struct xt_target *target = t->u.kernel.target;
927 	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
928 	int pad, off = xt_compat_target_offset(target);
929 	u_int16_t tsize = ct->u.user.target_size;
930 	char name[sizeof(t->u.user.name)];
931 
932 	t = *dstptr;
933 	memcpy(t, ct, sizeof(*ct));
934 	if (target->compat_from_user)
935 		target->compat_from_user(t->data, ct->data);
936 	else
937 		memcpy(t->data, ct->data, tsize - sizeof(*ct));
938 	pad = XT_ALIGN(target->targetsize) - target->targetsize;
939 	if (pad > 0)
940 		memset(t->data + target->targetsize, 0, pad);
941 
942 	tsize += off;
943 	t->u.user.target_size = tsize;
944 	strlcpy(name, target->name, sizeof(name));
945 	module_put(target->me);
946 	strncpy(t->u.user.name, name, sizeof(t->u.user.name));
947 
948 	*size += off;
949 	*dstptr += tsize;
950 }
951 EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
952 
xt_compat_target_to_user(const struct xt_entry_target * t,void __user ** dstptr,unsigned int * size)953 int xt_compat_target_to_user(const struct xt_entry_target *t,
954 			     void __user **dstptr, unsigned int *size)
955 {
956 	const struct xt_target *target = t->u.kernel.target;
957 	struct compat_xt_entry_target __user *ct = *dstptr;
958 	int off = xt_compat_target_offset(target);
959 	u_int16_t tsize = t->u.user.target_size - off;
960 
961 	if (copy_to_user(ct, t, sizeof(*ct)) ||
962 	    put_user(tsize, &ct->u.user.target_size) ||
963 	    copy_to_user(ct->u.user.name, t->u.kernel.target->name,
964 			 strlen(t->u.kernel.target->name) + 1))
965 		return -EFAULT;
966 
967 	if (target->compat_to_user) {
968 		if (target->compat_to_user((void __user *)ct->data, t->data))
969 			return -EFAULT;
970 	} else {
971 		if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
972 			return -EFAULT;
973 	}
974 
975 	*size -= off;
976 	*dstptr += tsize;
977 	return 0;
978 }
979 EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
980 #endif
981 
xt_alloc_table_info(unsigned int size)982 struct xt_table_info *xt_alloc_table_info(unsigned int size)
983 {
984 	struct xt_table_info *info = NULL;
985 	size_t sz = sizeof(*info) + size;
986 
987 	if (sz < sizeof(*info))
988 		return NULL;
989 
990 	/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
991 	if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
992 		return NULL;
993 
994 	if (sz <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
995 		info = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
996 	if (!info) {
997 		info = vmalloc(sz);
998 		if (!info)
999 			return NULL;
1000 	}
1001 	memset(info, 0, sizeof(*info));
1002 	info->size = size;
1003 	return info;
1004 }
1005 EXPORT_SYMBOL(xt_alloc_table_info);
1006 
xt_free_table_info(struct xt_table_info * info)1007 void xt_free_table_info(struct xt_table_info *info)
1008 {
1009 	int cpu;
1010 
1011 	if (info->jumpstack != NULL) {
1012 		for_each_possible_cpu(cpu)
1013 			kvfree(info->jumpstack[cpu]);
1014 		kvfree(info->jumpstack);
1015 	}
1016 
1017 	kvfree(info);
1018 }
1019 EXPORT_SYMBOL(xt_free_table_info);
1020 
1021 /* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
xt_find_table_lock(struct net * net,u_int8_t af,const char * name)1022 struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
1023 				    const char *name)
1024 {
1025 	struct xt_table *t, *found = NULL;
1026 
1027 	mutex_lock(&xt[af].mutex);
1028 	list_for_each_entry(t, &net->xt.tables[af], list)
1029 		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
1030 			return t;
1031 
1032 	if (net == &init_net)
1033 		goto out;
1034 
1035 	/* Table doesn't exist in this netns, re-try init */
1036 	list_for_each_entry(t, &init_net.xt.tables[af], list) {
1037 		if (strcmp(t->name, name))
1038 			continue;
1039 		if (!try_module_get(t->me)) {
1040 			mutex_unlock(&xt[af].mutex);
1041 			return NULL;
1042 		}
1043 
1044 		mutex_unlock(&xt[af].mutex);
1045 		if (t->table_init(net) != 0) {
1046 			module_put(t->me);
1047 			return NULL;
1048 		}
1049 
1050 		found = t;
1051 
1052 		mutex_lock(&xt[af].mutex);
1053 		break;
1054 	}
1055 
1056 	if (!found)
1057 		goto out;
1058 
1059 	/* and once again: */
1060 	list_for_each_entry(t, &net->xt.tables[af], list)
1061 		if (strcmp(t->name, name) == 0)
1062 			return t;
1063 
1064 	module_put(found->me);
1065  out:
1066 	mutex_unlock(&xt[af].mutex);
1067 	return NULL;
1068 }
1069 EXPORT_SYMBOL_GPL(xt_find_table_lock);
1070 
xt_table_unlock(struct xt_table * table)1071 void xt_table_unlock(struct xt_table *table)
1072 {
1073 	mutex_unlock(&xt[table->af].mutex);
1074 }
1075 EXPORT_SYMBOL_GPL(xt_table_unlock);
1076 
1077 #ifdef CONFIG_COMPAT
xt_compat_lock(u_int8_t af)1078 void xt_compat_lock(u_int8_t af)
1079 {
1080 	mutex_lock(&xt[af].compat_mutex);
1081 }
1082 EXPORT_SYMBOL_GPL(xt_compat_lock);
1083 
xt_compat_unlock(u_int8_t af)1084 void xt_compat_unlock(u_int8_t af)
1085 {
1086 	mutex_unlock(&xt[af].compat_mutex);
1087 }
1088 EXPORT_SYMBOL_GPL(xt_compat_unlock);
1089 #endif
1090 
1091 DEFINE_PER_CPU(seqcount_t, xt_recseq);
1092 EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
1093 
1094 struct static_key xt_tee_enabled __read_mostly;
1095 EXPORT_SYMBOL_GPL(xt_tee_enabled);
1096 
xt_jumpstack_alloc(struct xt_table_info * i)1097 static int xt_jumpstack_alloc(struct xt_table_info *i)
1098 {
1099 	unsigned int size;
1100 	int cpu;
1101 
1102 	size = sizeof(void **) * nr_cpu_ids;
1103 	if (size > PAGE_SIZE)
1104 		i->jumpstack = vzalloc(size);
1105 	else
1106 		i->jumpstack = kzalloc(size, GFP_KERNEL);
1107 	if (i->jumpstack == NULL)
1108 		return -ENOMEM;
1109 
1110 	/* ruleset without jumps -- no stack needed */
1111 	if (i->stacksize == 0)
1112 		return 0;
1113 
1114 	/* Jumpstack needs to be able to record two full callchains, one
1115 	 * from the first rule set traversal, plus one table reentrancy
1116 	 * via -j TEE without clobbering the callchain that brought us to
1117 	 * TEE target.
1118 	 *
1119 	 * This is done by allocating two jumpstacks per cpu, on reentry
1120 	 * the upper half of the stack is used.
1121 	 *
1122 	 * see the jumpstack setup in ipt_do_table() for more details.
1123 	 */
1124 	size = sizeof(void *) * i->stacksize * 2u;
1125 	for_each_possible_cpu(cpu) {
1126 		if (size > PAGE_SIZE)
1127 			i->jumpstack[cpu] = vmalloc_node(size,
1128 				cpu_to_node(cpu));
1129 		else
1130 			i->jumpstack[cpu] = kmalloc_node(size,
1131 				GFP_KERNEL, cpu_to_node(cpu));
1132 		if (i->jumpstack[cpu] == NULL)
1133 			/*
1134 			 * Freeing will be done later on by the callers. The
1135 			 * chain is: xt_replace_table -> __do_replace ->
1136 			 * do_replace -> xt_free_table_info.
1137 			 */
1138 			return -ENOMEM;
1139 	}
1140 
1141 	return 0;
1142 }
1143 
1144 struct xt_table_info *
xt_replace_table(struct xt_table * table,unsigned int num_counters,struct xt_table_info * newinfo,int * error)1145 xt_replace_table(struct xt_table *table,
1146 	      unsigned int num_counters,
1147 	      struct xt_table_info *newinfo,
1148 	      int *error)
1149 {
1150 	struct xt_table_info *private;
1151 	int ret;
1152 
1153 	ret = xt_jumpstack_alloc(newinfo);
1154 	if (ret < 0) {
1155 		*error = ret;
1156 		return NULL;
1157 	}
1158 
1159 	/* Do the substitution. */
1160 	local_bh_disable();
1161 	private = table->private;
1162 
1163 	/* Check inside lock: is the old number correct? */
1164 	if (num_counters != private->number) {
1165 		pr_debug("num_counters != table->private->number (%u/%u)\n",
1166 			 num_counters, private->number);
1167 		local_bh_enable();
1168 		*error = -EAGAIN;
1169 		return NULL;
1170 	}
1171 
1172 	newinfo->initial_entries = private->initial_entries;
1173 	/*
1174 	 * Ensure contents of newinfo are visible before assigning to
1175 	 * private.
1176 	 */
1177 	smp_wmb();
1178 	table->private = newinfo;
1179 
1180 	/*
1181 	 * Even though table entries have now been swapped, other CPU's
1182 	 * may still be using the old entries. This is okay, because
1183 	 * resynchronization happens because of the locking done
1184 	 * during the get_counters() routine.
1185 	 */
1186 	local_bh_enable();
1187 
1188 #ifdef CONFIG_AUDIT
1189 	if (audit_enabled) {
1190 		struct audit_buffer *ab;
1191 
1192 		ab = audit_log_start(current->audit_context, GFP_KERNEL,
1193 				     AUDIT_NETFILTER_CFG);
1194 		if (ab) {
1195 			audit_log_format(ab, "table=%s family=%u entries=%u",
1196 					 table->name, table->af,
1197 					 private->number);
1198 			audit_log_end(ab);
1199 		}
1200 	}
1201 #endif
1202 
1203 	return private;
1204 }
1205 EXPORT_SYMBOL_GPL(xt_replace_table);
1206 
xt_register_table(struct net * net,const struct xt_table * input_table,struct xt_table_info * bootstrap,struct xt_table_info * newinfo)1207 struct xt_table *xt_register_table(struct net *net,
1208 				   const struct xt_table *input_table,
1209 				   struct xt_table_info *bootstrap,
1210 				   struct xt_table_info *newinfo)
1211 {
1212 	int ret;
1213 	struct xt_table_info *private;
1214 	struct xt_table *t, *table;
1215 
1216 	/* Don't add one object to multiple lists. */
1217 	table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
1218 	if (!table) {
1219 		ret = -ENOMEM;
1220 		goto out;
1221 	}
1222 
1223 	mutex_lock(&xt[table->af].mutex);
1224 	/* Don't autoload: we'd eat our tail... */
1225 	list_for_each_entry(t, &net->xt.tables[table->af], list) {
1226 		if (strcmp(t->name, table->name) == 0) {
1227 			ret = -EEXIST;
1228 			goto unlock;
1229 		}
1230 	}
1231 
1232 	/* Simplifies replace_table code. */
1233 	table->private = bootstrap;
1234 
1235 	if (!xt_replace_table(table, 0, newinfo, &ret))
1236 		goto unlock;
1237 
1238 	private = table->private;
1239 	pr_debug("table->private->number = %u\n", private->number);
1240 
1241 	/* save number of initial entries */
1242 	private->initial_entries = private->number;
1243 
1244 	list_add(&table->list, &net->xt.tables[table->af]);
1245 	mutex_unlock(&xt[table->af].mutex);
1246 	return table;
1247 
1248 unlock:
1249 	mutex_unlock(&xt[table->af].mutex);
1250 	kfree(table);
1251 out:
1252 	return ERR_PTR(ret);
1253 }
1254 EXPORT_SYMBOL_GPL(xt_register_table);
1255 
xt_unregister_table(struct xt_table * table)1256 void *xt_unregister_table(struct xt_table *table)
1257 {
1258 	struct xt_table_info *private;
1259 
1260 	mutex_lock(&xt[table->af].mutex);
1261 	private = table->private;
1262 	list_del(&table->list);
1263 	mutex_unlock(&xt[table->af].mutex);
1264 	kfree(table);
1265 
1266 	return private;
1267 }
1268 EXPORT_SYMBOL_GPL(xt_unregister_table);
1269 
1270 #ifdef CONFIG_PROC_FS
1271 struct xt_names_priv {
1272 	struct seq_net_private p;
1273 	u_int8_t af;
1274 };
xt_table_seq_start(struct seq_file * seq,loff_t * pos)1275 static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
1276 {
1277 	struct xt_names_priv *priv = seq->private;
1278 	struct net *net = seq_file_net(seq);
1279 	u_int8_t af = priv->af;
1280 
1281 	mutex_lock(&xt[af].mutex);
1282 	return seq_list_start(&net->xt.tables[af], *pos);
1283 }
1284 
xt_table_seq_next(struct seq_file * seq,void * v,loff_t * pos)1285 static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1286 {
1287 	struct xt_names_priv *priv = seq->private;
1288 	struct net *net = seq_file_net(seq);
1289 	u_int8_t af = priv->af;
1290 
1291 	return seq_list_next(v, &net->xt.tables[af], pos);
1292 }
1293 
xt_table_seq_stop(struct seq_file * seq,void * v)1294 static void xt_table_seq_stop(struct seq_file *seq, void *v)
1295 {
1296 	struct xt_names_priv *priv = seq->private;
1297 	u_int8_t af = priv->af;
1298 
1299 	mutex_unlock(&xt[af].mutex);
1300 }
1301 
xt_table_seq_show(struct seq_file * seq,void * v)1302 static int xt_table_seq_show(struct seq_file *seq, void *v)
1303 {
1304 	struct xt_table *table = list_entry(v, struct xt_table, list);
1305 
1306 	if (*table->name)
1307 		seq_printf(seq, "%s\n", table->name);
1308 	return 0;
1309 }
1310 
1311 static const struct seq_operations xt_table_seq_ops = {
1312 	.start	= xt_table_seq_start,
1313 	.next	= xt_table_seq_next,
1314 	.stop	= xt_table_seq_stop,
1315 	.show	= xt_table_seq_show,
1316 };
1317 
xt_table_open(struct inode * inode,struct file * file)1318 static int xt_table_open(struct inode *inode, struct file *file)
1319 {
1320 	int ret;
1321 	struct xt_names_priv *priv;
1322 
1323 	ret = seq_open_net(inode, file, &xt_table_seq_ops,
1324 			   sizeof(struct xt_names_priv));
1325 	if (!ret) {
1326 		priv = ((struct seq_file *)file->private_data)->private;
1327 		priv->af = (unsigned long)PDE_DATA(inode);
1328 	}
1329 	return ret;
1330 }
1331 
1332 static const struct file_operations xt_table_ops = {
1333 	.owner	 = THIS_MODULE,
1334 	.open	 = xt_table_open,
1335 	.read	 = seq_read,
1336 	.llseek	 = seq_lseek,
1337 	.release = seq_release_net,
1338 };
1339 
1340 /*
1341  * Traverse state for ip{,6}_{tables,matches} for helping crossing
1342  * the multi-AF mutexes.
1343  */
1344 struct nf_mttg_trav {
1345 	struct list_head *head, *curr;
1346 	uint8_t class, nfproto;
1347 };
1348 
1349 enum {
1350 	MTTG_TRAV_INIT,
1351 	MTTG_TRAV_NFP_UNSPEC,
1352 	MTTG_TRAV_NFP_SPEC,
1353 	MTTG_TRAV_DONE,
1354 };
1355 
xt_mttg_seq_next(struct seq_file * seq,void * v,loff_t * ppos,bool is_target)1356 static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
1357     bool is_target)
1358 {
1359 	static const uint8_t next_class[] = {
1360 		[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
1361 		[MTTG_TRAV_NFP_SPEC]   = MTTG_TRAV_DONE,
1362 	};
1363 	struct nf_mttg_trav *trav = seq->private;
1364 
1365 	switch (trav->class) {
1366 	case MTTG_TRAV_INIT:
1367 		trav->class = MTTG_TRAV_NFP_UNSPEC;
1368 		mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
1369 		trav->head = trav->curr = is_target ?
1370 			&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1371  		break;
1372 	case MTTG_TRAV_NFP_UNSPEC:
1373 		trav->curr = trav->curr->next;
1374 		if (trav->curr != trav->head)
1375 			break;
1376 		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1377 		mutex_lock(&xt[trav->nfproto].mutex);
1378 		trav->head = trav->curr = is_target ?
1379 			&xt[trav->nfproto].target : &xt[trav->nfproto].match;
1380 		trav->class = next_class[trav->class];
1381 		break;
1382 	case MTTG_TRAV_NFP_SPEC:
1383 		trav->curr = trav->curr->next;
1384 		if (trav->curr != trav->head)
1385 			break;
1386 		/* fallthru, _stop will unlock */
1387 	default:
1388 		return NULL;
1389 	}
1390 
1391 	if (ppos != NULL)
1392 		++*ppos;
1393 	return trav;
1394 }
1395 
xt_mttg_seq_start(struct seq_file * seq,loff_t * pos,bool is_target)1396 static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1397     bool is_target)
1398 {
1399 	struct nf_mttg_trav *trav = seq->private;
1400 	unsigned int j;
1401 
1402 	trav->class = MTTG_TRAV_INIT;
1403 	for (j = 0; j < *pos; ++j)
1404 		if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1405 			return NULL;
1406 	return trav;
1407 }
1408 
xt_mttg_seq_stop(struct seq_file * seq,void * v)1409 static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1410 {
1411 	struct nf_mttg_trav *trav = seq->private;
1412 
1413 	switch (trav->class) {
1414 	case MTTG_TRAV_NFP_UNSPEC:
1415 		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1416 		break;
1417 	case MTTG_TRAV_NFP_SPEC:
1418 		mutex_unlock(&xt[trav->nfproto].mutex);
1419 		break;
1420 	}
1421 }
1422 
xt_match_seq_start(struct seq_file * seq,loff_t * pos)1423 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1424 {
1425 	return xt_mttg_seq_start(seq, pos, false);
1426 }
1427 
xt_match_seq_next(struct seq_file * seq,void * v,loff_t * ppos)1428 static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1429 {
1430 	return xt_mttg_seq_next(seq, v, ppos, false);
1431 }
1432 
xt_match_seq_show(struct seq_file * seq,void * v)1433 static int xt_match_seq_show(struct seq_file *seq, void *v)
1434 {
1435 	const struct nf_mttg_trav *trav = seq->private;
1436 	const struct xt_match *match;
1437 
1438 	switch (trav->class) {
1439 	case MTTG_TRAV_NFP_UNSPEC:
1440 	case MTTG_TRAV_NFP_SPEC:
1441 		if (trav->curr == trav->head)
1442 			return 0;
1443 		match = list_entry(trav->curr, struct xt_match, list);
1444 		if (*match->name)
1445 			seq_printf(seq, "%s\n", match->name);
1446 	}
1447 	return 0;
1448 }
1449 
1450 static const struct seq_operations xt_match_seq_ops = {
1451 	.start	= xt_match_seq_start,
1452 	.next	= xt_match_seq_next,
1453 	.stop	= xt_mttg_seq_stop,
1454 	.show	= xt_match_seq_show,
1455 };
1456 
xt_match_open(struct inode * inode,struct file * file)1457 static int xt_match_open(struct inode *inode, struct file *file)
1458 {
1459 	struct nf_mttg_trav *trav;
1460 	trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
1461 	if (!trav)
1462 		return -ENOMEM;
1463 
1464 	trav->nfproto = (unsigned long)PDE_DATA(inode);
1465 	return 0;
1466 }
1467 
1468 static const struct file_operations xt_match_ops = {
1469 	.owner	 = THIS_MODULE,
1470 	.open	 = xt_match_open,
1471 	.read	 = seq_read,
1472 	.llseek	 = seq_lseek,
1473 	.release = seq_release_private,
1474 };
1475 
xt_target_seq_start(struct seq_file * seq,loff_t * pos)1476 static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1477 {
1478 	return xt_mttg_seq_start(seq, pos, true);
1479 }
1480 
xt_target_seq_next(struct seq_file * seq,void * v,loff_t * ppos)1481 static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1482 {
1483 	return xt_mttg_seq_next(seq, v, ppos, true);
1484 }
1485 
xt_target_seq_show(struct seq_file * seq,void * v)1486 static int xt_target_seq_show(struct seq_file *seq, void *v)
1487 {
1488 	const struct nf_mttg_trav *trav = seq->private;
1489 	const struct xt_target *target;
1490 
1491 	switch (trav->class) {
1492 	case MTTG_TRAV_NFP_UNSPEC:
1493 	case MTTG_TRAV_NFP_SPEC:
1494 		if (trav->curr == trav->head)
1495 			return 0;
1496 		target = list_entry(trav->curr, struct xt_target, list);
1497 		if (*target->name)
1498 			seq_printf(seq, "%s\n", target->name);
1499 	}
1500 	return 0;
1501 }
1502 
1503 static const struct seq_operations xt_target_seq_ops = {
1504 	.start	= xt_target_seq_start,
1505 	.next	= xt_target_seq_next,
1506 	.stop	= xt_mttg_seq_stop,
1507 	.show	= xt_target_seq_show,
1508 };
1509 
xt_target_open(struct inode * inode,struct file * file)1510 static int xt_target_open(struct inode *inode, struct file *file)
1511 {
1512 	struct nf_mttg_trav *trav;
1513 	trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
1514 	if (!trav)
1515 		return -ENOMEM;
1516 
1517 	trav->nfproto = (unsigned long)PDE_DATA(inode);
1518 	return 0;
1519 }
1520 
1521 static const struct file_operations xt_target_ops = {
1522 	.owner	 = THIS_MODULE,
1523 	.open	 = xt_target_open,
1524 	.read	 = seq_read,
1525 	.llseek	 = seq_lseek,
1526 	.release = seq_release_private,
1527 };
1528 
1529 #define FORMAT_TABLES	"_tables_names"
1530 #define	FORMAT_MATCHES	"_tables_matches"
1531 #define FORMAT_TARGETS 	"_tables_targets"
1532 
1533 #endif /* CONFIG_PROC_FS */
1534 
1535 /**
1536  * xt_hook_ops_alloc - set up hooks for a new table
1537  * @table:	table with metadata needed to set up hooks
1538  * @fn:		Hook function
1539  *
1540  * This function will create the nf_hook_ops that the x_table needs
1541  * to hand to xt_hook_link_net().
1542  */
1543 struct nf_hook_ops *
xt_hook_ops_alloc(const struct xt_table * table,nf_hookfn * fn)1544 xt_hook_ops_alloc(const struct xt_table *table, nf_hookfn *fn)
1545 {
1546 	unsigned int hook_mask = table->valid_hooks;
1547 	uint8_t i, num_hooks = hweight32(hook_mask);
1548 	uint8_t hooknum;
1549 	struct nf_hook_ops *ops;
1550 
1551 	if (!num_hooks)
1552 		return ERR_PTR(-EINVAL);
1553 
1554 	ops = kcalloc(num_hooks, sizeof(*ops), GFP_KERNEL);
1555 	if (ops == NULL)
1556 		return ERR_PTR(-ENOMEM);
1557 
1558 	for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1559 	     hook_mask >>= 1, ++hooknum) {
1560 		if (!(hook_mask & 1))
1561 			continue;
1562 		ops[i].hook     = fn;
1563 		ops[i].pf       = table->af;
1564 		ops[i].hooknum  = hooknum;
1565 		ops[i].priority = table->priority;
1566 		++i;
1567 	}
1568 
1569 	return ops;
1570 }
1571 EXPORT_SYMBOL_GPL(xt_hook_ops_alloc);
1572 
xt_proto_init(struct net * net,u_int8_t af)1573 int xt_proto_init(struct net *net, u_int8_t af)
1574 {
1575 #ifdef CONFIG_PROC_FS
1576 	char buf[XT_FUNCTION_MAXNAMELEN];
1577 	struct proc_dir_entry *proc;
1578 	kuid_t root_uid;
1579 	kgid_t root_gid;
1580 #endif
1581 
1582 	if (af >= ARRAY_SIZE(xt_prefix))
1583 		return -EINVAL;
1584 
1585 
1586 #ifdef CONFIG_PROC_FS
1587 	root_uid = make_kuid(net->user_ns, 0);
1588 	root_gid = make_kgid(net->user_ns, 0);
1589 
1590 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1591 	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1592 	proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1593 				(void *)(unsigned long)af);
1594 	if (!proc)
1595 		goto out;
1596 	if (uid_valid(root_uid) && gid_valid(root_gid))
1597 		proc_set_user(proc, root_uid, root_gid);
1598 
1599 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1600 	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1601 	proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1602 				(void *)(unsigned long)af);
1603 	if (!proc)
1604 		goto out_remove_tables;
1605 	if (uid_valid(root_uid) && gid_valid(root_gid))
1606 		proc_set_user(proc, root_uid, root_gid);
1607 
1608 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1609 	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1610 	proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1611 				(void *)(unsigned long)af);
1612 	if (!proc)
1613 		goto out_remove_matches;
1614 	if (uid_valid(root_uid) && gid_valid(root_gid))
1615 		proc_set_user(proc, root_uid, root_gid);
1616 #endif
1617 
1618 	return 0;
1619 
1620 #ifdef CONFIG_PROC_FS
1621 out_remove_matches:
1622 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1623 	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1624 	remove_proc_entry(buf, net->proc_net);
1625 
1626 out_remove_tables:
1627 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1628 	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1629 	remove_proc_entry(buf, net->proc_net);
1630 out:
1631 	return -1;
1632 #endif
1633 }
1634 EXPORT_SYMBOL_GPL(xt_proto_init);
1635 
xt_proto_fini(struct net * net,u_int8_t af)1636 void xt_proto_fini(struct net *net, u_int8_t af)
1637 {
1638 #ifdef CONFIG_PROC_FS
1639 	char buf[XT_FUNCTION_MAXNAMELEN];
1640 
1641 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1642 	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1643 	remove_proc_entry(buf, net->proc_net);
1644 
1645 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1646 	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1647 	remove_proc_entry(buf, net->proc_net);
1648 
1649 	strlcpy(buf, xt_prefix[af], sizeof(buf));
1650 	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1651 	remove_proc_entry(buf, net->proc_net);
1652 #endif /*CONFIG_PROC_FS*/
1653 }
1654 EXPORT_SYMBOL_GPL(xt_proto_fini);
1655 
1656 /**
1657  * xt_percpu_counter_alloc - allocate x_tables rule counter
1658  *
1659  * @state: pointer to xt_percpu allocation state
1660  * @counter: pointer to counter struct inside the ip(6)/arpt_entry struct
1661  *
1662  * On SMP, the packet counter [ ip(6)t_entry->counters.pcnt ] will then
1663  * contain the address of the real (percpu) counter.
1664  *
1665  * Rule evaluation needs to use xt_get_this_cpu_counter() helper
1666  * to fetch the real percpu counter.
1667  *
1668  * To speed up allocation and improve data locality, a 4kb block is
1669  * allocated.
1670  *
1671  * xt_percpu_counter_alloc_state contains the base address of the
1672  * allocated page and the current sub-offset.
1673  *
1674  * returns false on error.
1675  */
xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state * state,struct xt_counters * counter)1676 bool xt_percpu_counter_alloc(struct xt_percpu_counter_alloc_state *state,
1677 			     struct xt_counters *counter)
1678 {
1679 	BUILD_BUG_ON(XT_PCPU_BLOCK_SIZE < (sizeof(*counter) * 2));
1680 
1681 	if (nr_cpu_ids <= 1)
1682 		return true;
1683 
1684 	if (!state->mem) {
1685 		state->mem = __alloc_percpu(XT_PCPU_BLOCK_SIZE,
1686 					    XT_PCPU_BLOCK_SIZE);
1687 		if (!state->mem)
1688 			return false;
1689 	}
1690 	counter->pcnt = (__force unsigned long)(state->mem + state->off);
1691 	state->off += sizeof(*counter);
1692 	if (state->off > (XT_PCPU_BLOCK_SIZE - sizeof(*counter))) {
1693 		state->mem = NULL;
1694 		state->off = 0;
1695 	}
1696 	return true;
1697 }
1698 EXPORT_SYMBOL_GPL(xt_percpu_counter_alloc);
1699 
xt_percpu_counter_free(struct xt_counters * counters)1700 void xt_percpu_counter_free(struct xt_counters *counters)
1701 {
1702 	unsigned long pcnt = counters->pcnt;
1703 
1704 	if (nr_cpu_ids > 1 && (pcnt & (XT_PCPU_BLOCK_SIZE - 1)) == 0)
1705 		free_percpu((void __percpu *)pcnt);
1706 }
1707 EXPORT_SYMBOL_GPL(xt_percpu_counter_free);
1708 
xt_net_init(struct net * net)1709 static int __net_init xt_net_init(struct net *net)
1710 {
1711 	int i;
1712 
1713 	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1714 		INIT_LIST_HEAD(&net->xt.tables[i]);
1715 	return 0;
1716 }
1717 
1718 static struct pernet_operations xt_net_ops = {
1719 	.init = xt_net_init,
1720 };
1721 
xt_init(void)1722 static int __init xt_init(void)
1723 {
1724 	unsigned int i;
1725 	int rv;
1726 
1727 	for_each_possible_cpu(i) {
1728 		seqcount_init(&per_cpu(xt_recseq, i));
1729 	}
1730 
1731 	xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1732 	if (!xt)
1733 		return -ENOMEM;
1734 
1735 	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1736 		mutex_init(&xt[i].mutex);
1737 #ifdef CONFIG_COMPAT
1738 		mutex_init(&xt[i].compat_mutex);
1739 		xt[i].compat_tab = NULL;
1740 #endif
1741 		INIT_LIST_HEAD(&xt[i].target);
1742 		INIT_LIST_HEAD(&xt[i].match);
1743 	}
1744 	rv = register_pernet_subsys(&xt_net_ops);
1745 	if (rv < 0)
1746 		kfree(xt);
1747 	return rv;
1748 }
1749 
xt_fini(void)1750 static void __exit xt_fini(void)
1751 {
1752 	unregister_pernet_subsys(&xt_net_ops);
1753 	kfree(xt);
1754 }
1755 
1756 module_init(xt_init);
1757 module_exit(xt_fini);
1758 
1759