1 /*
2 * Packet matching code.
3 *
4 * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5 * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6 * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/kernel.h>
16 #include <linux/capability.h>
17 #include <linux/in.h>
18 #include <linux/skbuff.h>
19 #include <linux/kmod.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netdevice.h>
22 #include <linux/module.h>
23 #include <linux/poison.h>
24 #include <linux/icmpv6.h>
25 #include <net/ipv6.h>
26 #include <net/compat.h>
27 #include <asm/uaccess.h>
28 #include <linux/mutex.h>
29 #include <linux/proc_fs.h>
30 #include <linux/err.h>
31 #include <linux/cpumask.h>
32
33 #include <linux/netfilter_ipv6/ip6_tables.h>
34 #include <linux/netfilter/x_tables.h>
35 #include <net/netfilter/nf_log.h>
36 #include "../../netfilter/xt_repldata.h"
37
38 MODULE_LICENSE("GPL");
39 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
40 MODULE_DESCRIPTION("IPv6 packet filter");
41
42 #ifdef CONFIG_NETFILTER_DEBUG
43 #define IP_NF_ASSERT(x) WARN_ON(!(x))
44 #else
45 #define IP_NF_ASSERT(x)
46 #endif
47
ip6t_alloc_initial_table(const struct xt_table * info)48 void *ip6t_alloc_initial_table(const struct xt_table *info)
49 {
50 return xt_alloc_initial_table(ip6t, IP6T);
51 }
52 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
53
54 /*
55 We keep a set of rules for each CPU, so we can avoid write-locking
56 them in the softirq when updating the counters and therefore
57 only need to read-lock in the softirq; doing a write_lock_bh() in user
58 context stops packets coming through and allows user context to read
59 the counters or update the rules.
60
61 Hence the start of any table is given by get_table() below. */
62
63 /* Returns whether matches rule or not. */
64 /* Performance critical - called for every packet */
65 static inline bool
ip6_packet_match(const struct sk_buff * skb,const char * indev,const char * outdev,const struct ip6t_ip6 * ip6info,unsigned int * protoff,int * fragoff,bool * hotdrop)66 ip6_packet_match(const struct sk_buff *skb,
67 const char *indev,
68 const char *outdev,
69 const struct ip6t_ip6 *ip6info,
70 unsigned int *protoff,
71 int *fragoff, bool *hotdrop)
72 {
73 unsigned long ret;
74 const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
75
76 if (NF_INVF(ip6info, IP6T_INV_SRCIP,
77 ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
78 &ip6info->src)) ||
79 NF_INVF(ip6info, IP6T_INV_DSTIP,
80 ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
81 &ip6info->dst)))
82 return false;
83
84 ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
85
86 if (NF_INVF(ip6info, IP6T_INV_VIA_IN, ret != 0))
87 return false;
88
89 ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
90
91 if (NF_INVF(ip6info, IP6T_INV_VIA_OUT, ret != 0))
92 return false;
93
94 /* ... might want to do something with class and flowlabel here ... */
95
96 /* look for the desired protocol header */
97 if (ip6info->flags & IP6T_F_PROTO) {
98 int protohdr;
99 unsigned short _frag_off;
100
101 protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL);
102 if (protohdr < 0) {
103 if (_frag_off == 0)
104 *hotdrop = true;
105 return false;
106 }
107 *fragoff = _frag_off;
108
109 if (ip6info->proto == protohdr) {
110 if (ip6info->invflags & IP6T_INV_PROTO)
111 return false;
112
113 return true;
114 }
115
116 /* We need match for the '-p all', too! */
117 if ((ip6info->proto != 0) &&
118 !(ip6info->invflags & IP6T_INV_PROTO))
119 return false;
120 }
121 return true;
122 }
123
124 /* should be ip6 safe */
125 static bool
ip6_checkentry(const struct ip6t_ip6 * ipv6)126 ip6_checkentry(const struct ip6t_ip6 *ipv6)
127 {
128 if (ipv6->flags & ~IP6T_F_MASK)
129 return false;
130 if (ipv6->invflags & ~IP6T_INV_MASK)
131 return false;
132
133 return true;
134 }
135
136 static unsigned int
ip6t_error(struct sk_buff * skb,const struct xt_action_param * par)137 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
138 {
139 net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
140
141 return NF_DROP;
142 }
143
144 static inline struct ip6t_entry *
get_entry(const void * base,unsigned int offset)145 get_entry(const void *base, unsigned int offset)
146 {
147 return (struct ip6t_entry *)(base + offset);
148 }
149
150 /* All zeroes == unconditional rule. */
151 /* Mildly perf critical (only if packet tracing is on) */
unconditional(const struct ip6t_entry * e)152 static inline bool unconditional(const struct ip6t_entry *e)
153 {
154 static const struct ip6t_ip6 uncond;
155
156 return e->target_offset == sizeof(struct ip6t_entry) &&
157 memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
158 }
159
160 static inline const struct xt_entry_target *
ip6t_get_target_c(const struct ip6t_entry * e)161 ip6t_get_target_c(const struct ip6t_entry *e)
162 {
163 return ip6t_get_target((struct ip6t_entry *)e);
164 }
165
166 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
167 /* This cries for unification! */
168 static const char *const hooknames[] = {
169 [NF_INET_PRE_ROUTING] = "PREROUTING",
170 [NF_INET_LOCAL_IN] = "INPUT",
171 [NF_INET_FORWARD] = "FORWARD",
172 [NF_INET_LOCAL_OUT] = "OUTPUT",
173 [NF_INET_POST_ROUTING] = "POSTROUTING",
174 };
175
176 enum nf_ip_trace_comments {
177 NF_IP6_TRACE_COMMENT_RULE,
178 NF_IP6_TRACE_COMMENT_RETURN,
179 NF_IP6_TRACE_COMMENT_POLICY,
180 };
181
182 static const char *const comments[] = {
183 [NF_IP6_TRACE_COMMENT_RULE] = "rule",
184 [NF_IP6_TRACE_COMMENT_RETURN] = "return",
185 [NF_IP6_TRACE_COMMENT_POLICY] = "policy",
186 };
187
188 static struct nf_loginfo trace_loginfo = {
189 .type = NF_LOG_TYPE_LOG,
190 .u = {
191 .log = {
192 .level = LOGLEVEL_WARNING,
193 .logflags = NF_LOG_DEFAULT_MASK,
194 },
195 },
196 };
197
198 /* Mildly perf critical (only if packet tracing is on) */
199 static inline int
get_chainname_rulenum(const struct ip6t_entry * s,const struct ip6t_entry * e,const char * hookname,const char ** chainname,const char ** comment,unsigned int * rulenum)200 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
201 const char *hookname, const char **chainname,
202 const char **comment, unsigned int *rulenum)
203 {
204 const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
205
206 if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
207 /* Head of user chain: ERROR target with chainname */
208 *chainname = t->target.data;
209 (*rulenum) = 0;
210 } else if (s == e) {
211 (*rulenum)++;
212
213 if (unconditional(s) &&
214 strcmp(t->target.u.kernel.target->name,
215 XT_STANDARD_TARGET) == 0 &&
216 t->verdict < 0) {
217 /* Tail of chains: STANDARD target (return/policy) */
218 *comment = *chainname == hookname
219 ? comments[NF_IP6_TRACE_COMMENT_POLICY]
220 : comments[NF_IP6_TRACE_COMMENT_RETURN];
221 }
222 return 1;
223 } else
224 (*rulenum)++;
225
226 return 0;
227 }
228
trace_packet(struct net * net,const struct sk_buff * skb,unsigned int hook,const struct net_device * in,const struct net_device * out,const char * tablename,const struct xt_table_info * private,const struct ip6t_entry * e)229 static void trace_packet(struct net *net,
230 const struct sk_buff *skb,
231 unsigned int hook,
232 const struct net_device *in,
233 const struct net_device *out,
234 const char *tablename,
235 const struct xt_table_info *private,
236 const struct ip6t_entry *e)
237 {
238 const struct ip6t_entry *root;
239 const char *hookname, *chainname, *comment;
240 const struct ip6t_entry *iter;
241 unsigned int rulenum = 0;
242
243 root = get_entry(private->entries, private->hook_entry[hook]);
244
245 hookname = chainname = hooknames[hook];
246 comment = comments[NF_IP6_TRACE_COMMENT_RULE];
247
248 xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
249 if (get_chainname_rulenum(iter, e, hookname,
250 &chainname, &comment, &rulenum) != 0)
251 break;
252
253 nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
254 "TRACE: %s:%s:%s:%u ",
255 tablename, chainname, comment, rulenum);
256 }
257 #endif
258
259 static inline struct ip6t_entry *
ip6t_next_entry(const struct ip6t_entry * entry)260 ip6t_next_entry(const struct ip6t_entry *entry)
261 {
262 return (void *)entry + entry->next_offset;
263 }
264
265 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
266 unsigned int
ip6t_do_table(struct sk_buff * skb,const struct nf_hook_state * state,struct xt_table * table)267 ip6t_do_table(struct sk_buff *skb,
268 const struct nf_hook_state *state,
269 struct xt_table *table)
270 {
271 unsigned int hook = state->hook;
272 static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
273 /* Initializing verdict to NF_DROP keeps gcc happy. */
274 unsigned int verdict = NF_DROP;
275 const char *indev, *outdev;
276 const void *table_base;
277 struct ip6t_entry *e, **jumpstack;
278 unsigned int stackidx, cpu;
279 const struct xt_table_info *private;
280 struct xt_action_param acpar;
281 unsigned int addend;
282
283 /* Initialization */
284 stackidx = 0;
285 indev = state->in ? state->in->name : nulldevname;
286 outdev = state->out ? state->out->name : nulldevname;
287 /* We handle fragments by dealing with the first fragment as
288 * if it was a normal packet. All other fragments are treated
289 * normally, except that they will NEVER match rules that ask
290 * things we don't know, ie. tcp syn flag or ports). If the
291 * rule is also a fragment-specific rule, non-fragments won't
292 * match it. */
293 acpar.hotdrop = false;
294 acpar.net = state->net;
295 acpar.in = state->in;
296 acpar.out = state->out;
297 acpar.family = NFPROTO_IPV6;
298 acpar.hooknum = hook;
299
300 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
301
302 local_bh_disable();
303 addend = xt_write_recseq_begin();
304 private = table->private;
305 /*
306 * Ensure we load private-> members after we've fetched the base
307 * pointer.
308 */
309 smp_read_barrier_depends();
310 cpu = smp_processor_id();
311 table_base = private->entries;
312 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
313
314 /* Switch to alternate jumpstack if we're being invoked via TEE.
315 * TEE issues XT_CONTINUE verdict on original skb so we must not
316 * clobber the jumpstack.
317 *
318 * For recursion via REJECT or SYNPROXY the stack will be clobbered
319 * but it is no problem since absolute verdict is issued by these.
320 */
321 if (static_key_false(&xt_tee_enabled))
322 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
323
324 e = get_entry(table_base, private->hook_entry[hook]);
325
326 do {
327 const struct xt_entry_target *t;
328 const struct xt_entry_match *ematch;
329 struct xt_counters *counter;
330
331 IP_NF_ASSERT(e);
332 acpar.thoff = 0;
333 if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
334 &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
335 no_match:
336 e = ip6t_next_entry(e);
337 continue;
338 }
339
340 xt_ematch_foreach(ematch, e) {
341 acpar.match = ematch->u.kernel.match;
342 acpar.matchinfo = ematch->data;
343 if (!acpar.match->match(skb, &acpar))
344 goto no_match;
345 }
346
347 counter = xt_get_this_cpu_counter(&e->counters);
348 ADD_COUNTER(*counter, skb->len, 1);
349
350 t = ip6t_get_target_c(e);
351 IP_NF_ASSERT(t->u.kernel.target);
352
353 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
354 /* The packet is traced: log it */
355 if (unlikely(skb->nf_trace))
356 trace_packet(state->net, skb, hook, state->in,
357 state->out, table->name, private, e);
358 #endif
359 /* Standard target? */
360 if (!t->u.kernel.target->target) {
361 int v;
362
363 v = ((struct xt_standard_target *)t)->verdict;
364 if (v < 0) {
365 /* Pop from stack? */
366 if (v != XT_RETURN) {
367 verdict = (unsigned int)(-v) - 1;
368 break;
369 }
370 if (stackidx == 0)
371 e = get_entry(table_base,
372 private->underflow[hook]);
373 else
374 e = ip6t_next_entry(jumpstack[--stackidx]);
375 continue;
376 }
377 if (table_base + v != ip6t_next_entry(e) &&
378 !(e->ipv6.flags & IP6T_F_GOTO)) {
379 if (unlikely(stackidx >= private->stacksize)) {
380 verdict = NF_DROP;
381 break;
382 }
383 jumpstack[stackidx++] = e;
384 }
385
386 e = get_entry(table_base, v);
387 continue;
388 }
389
390 acpar.target = t->u.kernel.target;
391 acpar.targinfo = t->data;
392
393 verdict = t->u.kernel.target->target(skb, &acpar);
394 if (verdict == XT_CONTINUE)
395 e = ip6t_next_entry(e);
396 else
397 /* Verdict */
398 break;
399 } while (!acpar.hotdrop);
400
401 xt_write_recseq_end(addend);
402 local_bh_enable();
403
404 if (acpar.hotdrop)
405 return NF_DROP;
406 else return verdict;
407 }
408
409 /* Figures out from what hook each rule can be called: returns 0 if
410 there are loops. Puts hook bitmask in comefrom. */
411 static int
mark_source_chains(const struct xt_table_info * newinfo,unsigned int valid_hooks,void * entry0,unsigned int * offsets)412 mark_source_chains(const struct xt_table_info *newinfo,
413 unsigned int valid_hooks, void *entry0,
414 unsigned int *offsets)
415 {
416 unsigned int hook;
417
418 /* No recursion; use packet counter to save back ptrs (reset
419 to 0 as we leave), and comefrom to save source hook bitmask */
420 for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
421 unsigned int pos = newinfo->hook_entry[hook];
422 struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
423
424 if (!(valid_hooks & (1 << hook)))
425 continue;
426
427 /* Set initial back pointer. */
428 e->counters.pcnt = pos;
429
430 for (;;) {
431 const struct xt_standard_target *t
432 = (void *)ip6t_get_target_c(e);
433 int visited = e->comefrom & (1 << hook);
434
435 if (e->comefrom & (1 << NF_INET_NUMHOOKS))
436 return 0;
437
438 e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
439
440 /* Unconditional return/END. */
441 if ((unconditional(e) &&
442 (strcmp(t->target.u.user.name,
443 XT_STANDARD_TARGET) == 0) &&
444 t->verdict < 0) || visited) {
445 unsigned int oldpos, size;
446
447 if ((strcmp(t->target.u.user.name,
448 XT_STANDARD_TARGET) == 0) &&
449 t->verdict < -NF_MAX_VERDICT - 1)
450 return 0;
451
452 /* Return: backtrack through the last
453 big jump. */
454 do {
455 e->comefrom ^= (1<<NF_INET_NUMHOOKS);
456 oldpos = pos;
457 pos = e->counters.pcnt;
458 e->counters.pcnt = 0;
459
460 /* We're at the start. */
461 if (pos == oldpos)
462 goto next;
463
464 e = (struct ip6t_entry *)
465 (entry0 + pos);
466 } while (oldpos == pos + e->next_offset);
467
468 /* Move along one */
469 size = e->next_offset;
470 e = (struct ip6t_entry *)
471 (entry0 + pos + size);
472 if (pos + size >= newinfo->size)
473 return 0;
474 e->counters.pcnt = pos;
475 pos += size;
476 } else {
477 int newpos = t->verdict;
478
479 if (strcmp(t->target.u.user.name,
480 XT_STANDARD_TARGET) == 0 &&
481 newpos >= 0) {
482 /* This a jump; chase it. */
483 if (!xt_find_jump_offset(offsets, newpos,
484 newinfo->number))
485 return 0;
486 e = (struct ip6t_entry *)
487 (entry0 + newpos);
488 } else {
489 /* ... this is a fallthru */
490 newpos = pos + e->next_offset;
491 if (newpos >= newinfo->size)
492 return 0;
493 }
494 e = (struct ip6t_entry *)
495 (entry0 + newpos);
496 e->counters.pcnt = pos;
497 pos = newpos;
498 }
499 }
500 next: ;
501 }
502 return 1;
503 }
504
cleanup_match(struct xt_entry_match * m,struct net * net)505 static void cleanup_match(struct xt_entry_match *m, struct net *net)
506 {
507 struct xt_mtdtor_param par;
508
509 par.net = net;
510 par.match = m->u.kernel.match;
511 par.matchinfo = m->data;
512 par.family = NFPROTO_IPV6;
513 if (par.match->destroy != NULL)
514 par.match->destroy(&par);
515 module_put(par.match->me);
516 }
517
check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)518 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
519 {
520 const struct ip6t_ip6 *ipv6 = par->entryinfo;
521
522 par->match = m->u.kernel.match;
523 par->matchinfo = m->data;
524
525 return xt_check_match(par, m->u.match_size - sizeof(*m),
526 ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
527 }
528
529 static int
find_check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)530 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
531 {
532 struct xt_match *match;
533 int ret;
534
535 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
536 m->u.user.revision);
537 if (IS_ERR(match))
538 return PTR_ERR(match);
539
540 m->u.kernel.match = match;
541
542 ret = check_match(m, par);
543 if (ret)
544 goto err;
545
546 return 0;
547 err:
548 module_put(m->u.kernel.match->me);
549 return ret;
550 }
551
check_target(struct ip6t_entry * e,struct net * net,const char * name)552 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
553 {
554 struct xt_entry_target *t = ip6t_get_target(e);
555 struct xt_tgchk_param par = {
556 .net = net,
557 .table = name,
558 .entryinfo = e,
559 .target = t->u.kernel.target,
560 .targinfo = t->data,
561 .hook_mask = e->comefrom,
562 .family = NFPROTO_IPV6,
563 };
564
565 t = ip6t_get_target(e);
566 return xt_check_target(&par, t->u.target_size - sizeof(*t),
567 e->ipv6.proto,
568 e->ipv6.invflags & IP6T_INV_PROTO);
569 }
570
571 static int
find_check_entry(struct ip6t_entry * e,struct net * net,const char * name,unsigned int size,struct xt_percpu_counter_alloc_state * alloc_state)572 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
573 unsigned int size,
574 struct xt_percpu_counter_alloc_state *alloc_state)
575 {
576 struct xt_entry_target *t;
577 struct xt_target *target;
578 int ret;
579 unsigned int j;
580 struct xt_mtchk_param mtpar;
581 struct xt_entry_match *ematch;
582
583 if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
584 return -ENOMEM;
585
586 j = 0;
587 mtpar.net = net;
588 mtpar.table = name;
589 mtpar.entryinfo = &e->ipv6;
590 mtpar.hook_mask = e->comefrom;
591 mtpar.family = NFPROTO_IPV6;
592 xt_ematch_foreach(ematch, e) {
593 ret = find_check_match(ematch, &mtpar);
594 if (ret != 0)
595 goto cleanup_matches;
596 ++j;
597 }
598
599 t = ip6t_get_target(e);
600 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
601 t->u.user.revision);
602 if (IS_ERR(target)) {
603 ret = PTR_ERR(target);
604 goto cleanup_matches;
605 }
606 t->u.kernel.target = target;
607
608 ret = check_target(e, net, name);
609 if (ret)
610 goto err;
611 return 0;
612 err:
613 module_put(t->u.kernel.target->me);
614 cleanup_matches:
615 xt_ematch_foreach(ematch, e) {
616 if (j-- == 0)
617 break;
618 cleanup_match(ematch, net);
619 }
620
621 xt_percpu_counter_free(&e->counters);
622
623 return ret;
624 }
625
check_underflow(const struct ip6t_entry * e)626 static bool check_underflow(const struct ip6t_entry *e)
627 {
628 const struct xt_entry_target *t;
629 unsigned int verdict;
630
631 if (!unconditional(e))
632 return false;
633 t = ip6t_get_target_c(e);
634 if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
635 return false;
636 verdict = ((struct xt_standard_target *)t)->verdict;
637 verdict = -verdict - 1;
638 return verdict == NF_DROP || verdict == NF_ACCEPT;
639 }
640
641 static int
check_entry_size_and_hooks(struct ip6t_entry * e,struct xt_table_info * newinfo,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,unsigned int valid_hooks)642 check_entry_size_and_hooks(struct ip6t_entry *e,
643 struct xt_table_info *newinfo,
644 const unsigned char *base,
645 const unsigned char *limit,
646 const unsigned int *hook_entries,
647 const unsigned int *underflows,
648 unsigned int valid_hooks)
649 {
650 unsigned int h;
651 int err;
652
653 if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
654 (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
655 (unsigned char *)e + e->next_offset > limit)
656 return -EINVAL;
657
658 if (e->next_offset
659 < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target))
660 return -EINVAL;
661
662 if (!ip6_checkentry(&e->ipv6))
663 return -EINVAL;
664
665 err = xt_check_entry_offsets(e, e->elems, e->target_offset,
666 e->next_offset);
667 if (err)
668 return err;
669
670 /* Check hooks & underflows */
671 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
672 if (!(valid_hooks & (1 << h)))
673 continue;
674 if ((unsigned char *)e - base == hook_entries[h])
675 newinfo->hook_entry[h] = hook_entries[h];
676 if ((unsigned char *)e - base == underflows[h]) {
677 if (!check_underflow(e))
678 return -EINVAL;
679
680 newinfo->underflow[h] = underflows[h];
681 }
682 }
683
684 /* Clear counters and comefrom */
685 e->counters = ((struct xt_counters) { 0, 0 });
686 e->comefrom = 0;
687 return 0;
688 }
689
cleanup_entry(struct ip6t_entry * e,struct net * net)690 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
691 {
692 struct xt_tgdtor_param par;
693 struct xt_entry_target *t;
694 struct xt_entry_match *ematch;
695
696 /* Cleanup all matches */
697 xt_ematch_foreach(ematch, e)
698 cleanup_match(ematch, net);
699 t = ip6t_get_target(e);
700
701 par.net = net;
702 par.target = t->u.kernel.target;
703 par.targinfo = t->data;
704 par.family = NFPROTO_IPV6;
705 if (par.target->destroy != NULL)
706 par.target->destroy(&par);
707 module_put(par.target->me);
708 xt_percpu_counter_free(&e->counters);
709 }
710
711 /* Checks and translates the user-supplied table segment (held in
712 newinfo) */
713 static int
translate_table(struct net * net,struct xt_table_info * newinfo,void * entry0,const struct ip6t_replace * repl)714 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
715 const struct ip6t_replace *repl)
716 {
717 struct xt_percpu_counter_alloc_state alloc_state = { 0 };
718 struct ip6t_entry *iter;
719 unsigned int *offsets;
720 unsigned int i;
721 int ret = 0;
722
723 newinfo->size = repl->size;
724 newinfo->number = repl->num_entries;
725
726 /* Init all hooks to impossible value. */
727 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
728 newinfo->hook_entry[i] = 0xFFFFFFFF;
729 newinfo->underflow[i] = 0xFFFFFFFF;
730 }
731
732 offsets = xt_alloc_entry_offsets(newinfo->number);
733 if (!offsets)
734 return -ENOMEM;
735 i = 0;
736 /* Walk through entries, checking offsets. */
737 xt_entry_foreach(iter, entry0, newinfo->size) {
738 ret = check_entry_size_and_hooks(iter, newinfo, entry0,
739 entry0 + repl->size,
740 repl->hook_entry,
741 repl->underflow,
742 repl->valid_hooks);
743 if (ret != 0)
744 goto out_free;
745 if (i < repl->num_entries)
746 offsets[i] = (void *)iter - entry0;
747 ++i;
748 if (strcmp(ip6t_get_target(iter)->u.user.name,
749 XT_ERROR_TARGET) == 0)
750 ++newinfo->stacksize;
751 }
752
753 ret = -EINVAL;
754 if (i != repl->num_entries)
755 goto out_free;
756
757 /* Check hooks all assigned */
758 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
759 /* Only hooks which are valid */
760 if (!(repl->valid_hooks & (1 << i)))
761 continue;
762 if (newinfo->hook_entry[i] == 0xFFFFFFFF)
763 goto out_free;
764 if (newinfo->underflow[i] == 0xFFFFFFFF)
765 goto out_free;
766 }
767
768 if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
769 ret = -ELOOP;
770 goto out_free;
771 }
772 kvfree(offsets);
773
774 /* Finally, each sanity check must pass */
775 i = 0;
776 xt_entry_foreach(iter, entry0, newinfo->size) {
777 ret = find_check_entry(iter, net, repl->name, repl->size,
778 &alloc_state);
779 if (ret != 0)
780 break;
781 ++i;
782 }
783
784 if (ret != 0) {
785 xt_entry_foreach(iter, entry0, newinfo->size) {
786 if (i-- == 0)
787 break;
788 cleanup_entry(iter, net);
789 }
790 return ret;
791 }
792
793 return ret;
794 out_free:
795 kvfree(offsets);
796 return ret;
797 }
798
799 static void
get_counters(const struct xt_table_info * t,struct xt_counters counters[])800 get_counters(const struct xt_table_info *t,
801 struct xt_counters counters[])
802 {
803 struct ip6t_entry *iter;
804 unsigned int cpu;
805 unsigned int i;
806
807 for_each_possible_cpu(cpu) {
808 seqcount_t *s = &per_cpu(xt_recseq, cpu);
809
810 i = 0;
811 xt_entry_foreach(iter, t->entries, t->size) {
812 struct xt_counters *tmp;
813 u64 bcnt, pcnt;
814 unsigned int start;
815
816 tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
817 do {
818 start = read_seqcount_begin(s);
819 bcnt = tmp->bcnt;
820 pcnt = tmp->pcnt;
821 } while (read_seqcount_retry(s, start));
822
823 ADD_COUNTER(counters[i], bcnt, pcnt);
824 ++i;
825 }
826 }
827 }
828
alloc_counters(const struct xt_table * table)829 static struct xt_counters *alloc_counters(const struct xt_table *table)
830 {
831 unsigned int countersize;
832 struct xt_counters *counters;
833 const struct xt_table_info *private = table->private;
834
835 /* We need atomic snapshot of counters: rest doesn't change
836 (other than comefrom, which userspace doesn't care
837 about). */
838 countersize = sizeof(struct xt_counters) * private->number;
839 counters = vzalloc(countersize);
840
841 if (counters == NULL)
842 return ERR_PTR(-ENOMEM);
843
844 get_counters(private, counters);
845
846 return counters;
847 }
848
849 static int
copy_entries_to_user(unsigned int total_size,const struct xt_table * table,void __user * userptr)850 copy_entries_to_user(unsigned int total_size,
851 const struct xt_table *table,
852 void __user *userptr)
853 {
854 unsigned int off, num;
855 const struct ip6t_entry *e;
856 struct xt_counters *counters;
857 const struct xt_table_info *private = table->private;
858 int ret = 0;
859 const void *loc_cpu_entry;
860
861 counters = alloc_counters(table);
862 if (IS_ERR(counters))
863 return PTR_ERR(counters);
864
865 loc_cpu_entry = private->entries;
866 if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
867 ret = -EFAULT;
868 goto free_counters;
869 }
870
871 /* FIXME: use iterator macros --RR */
872 /* ... then go back and fix counters and names */
873 for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
874 unsigned int i;
875 const struct xt_entry_match *m;
876 const struct xt_entry_target *t;
877
878 e = (struct ip6t_entry *)(loc_cpu_entry + off);
879 if (copy_to_user(userptr + off
880 + offsetof(struct ip6t_entry, counters),
881 &counters[num],
882 sizeof(counters[num])) != 0) {
883 ret = -EFAULT;
884 goto free_counters;
885 }
886
887 for (i = sizeof(struct ip6t_entry);
888 i < e->target_offset;
889 i += m->u.match_size) {
890 m = (void *)e + i;
891
892 if (copy_to_user(userptr + off + i
893 + offsetof(struct xt_entry_match,
894 u.user.name),
895 m->u.kernel.match->name,
896 strlen(m->u.kernel.match->name)+1)
897 != 0) {
898 ret = -EFAULT;
899 goto free_counters;
900 }
901 }
902
903 t = ip6t_get_target_c(e);
904 if (copy_to_user(userptr + off + e->target_offset
905 + offsetof(struct xt_entry_target,
906 u.user.name),
907 t->u.kernel.target->name,
908 strlen(t->u.kernel.target->name)+1) != 0) {
909 ret = -EFAULT;
910 goto free_counters;
911 }
912 }
913
914 free_counters:
915 vfree(counters);
916 return ret;
917 }
918
919 #ifdef CONFIG_COMPAT
compat_standard_from_user(void * dst,const void * src)920 static void compat_standard_from_user(void *dst, const void *src)
921 {
922 int v = *(compat_int_t *)src;
923
924 if (v > 0)
925 v += xt_compat_calc_jump(AF_INET6, v);
926 memcpy(dst, &v, sizeof(v));
927 }
928
compat_standard_to_user(void __user * dst,const void * src)929 static int compat_standard_to_user(void __user *dst, const void *src)
930 {
931 compat_int_t cv = *(int *)src;
932
933 if (cv > 0)
934 cv -= xt_compat_calc_jump(AF_INET6, cv);
935 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
936 }
937
compat_calc_entry(const struct ip6t_entry * e,const struct xt_table_info * info,const void * base,struct xt_table_info * newinfo)938 static int compat_calc_entry(const struct ip6t_entry *e,
939 const struct xt_table_info *info,
940 const void *base, struct xt_table_info *newinfo)
941 {
942 const struct xt_entry_match *ematch;
943 const struct xt_entry_target *t;
944 unsigned int entry_offset;
945 int off, i, ret;
946
947 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
948 entry_offset = (void *)e - base;
949 xt_ematch_foreach(ematch, e)
950 off += xt_compat_match_offset(ematch->u.kernel.match);
951 t = ip6t_get_target_c(e);
952 off += xt_compat_target_offset(t->u.kernel.target);
953 newinfo->size -= off;
954 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
955 if (ret)
956 return ret;
957
958 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
959 if (info->hook_entry[i] &&
960 (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
961 newinfo->hook_entry[i] -= off;
962 if (info->underflow[i] &&
963 (e < (struct ip6t_entry *)(base + info->underflow[i])))
964 newinfo->underflow[i] -= off;
965 }
966 return 0;
967 }
968
compat_table_info(const struct xt_table_info * info,struct xt_table_info * newinfo)969 static int compat_table_info(const struct xt_table_info *info,
970 struct xt_table_info *newinfo)
971 {
972 struct ip6t_entry *iter;
973 const void *loc_cpu_entry;
974 int ret;
975
976 if (!newinfo || !info)
977 return -EINVAL;
978
979 /* we dont care about newinfo->entries */
980 memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
981 newinfo->initial_entries = 0;
982 loc_cpu_entry = info->entries;
983 xt_compat_init_offsets(AF_INET6, info->number);
984 xt_entry_foreach(iter, loc_cpu_entry, info->size) {
985 ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
986 if (ret != 0)
987 return ret;
988 }
989 return 0;
990 }
991 #endif
992
get_info(struct net * net,void __user * user,const int * len,int compat)993 static int get_info(struct net *net, void __user *user,
994 const int *len, int compat)
995 {
996 char name[XT_TABLE_MAXNAMELEN];
997 struct xt_table *t;
998 int ret;
999
1000 if (*len != sizeof(struct ip6t_getinfo))
1001 return -EINVAL;
1002
1003 if (copy_from_user(name, user, sizeof(name)) != 0)
1004 return -EFAULT;
1005
1006 name[XT_TABLE_MAXNAMELEN-1] = '\0';
1007 #ifdef CONFIG_COMPAT
1008 if (compat)
1009 xt_compat_lock(AF_INET6);
1010 #endif
1011 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1012 "ip6table_%s", name);
1013 if (!IS_ERR_OR_NULL(t)) {
1014 struct ip6t_getinfo info;
1015 const struct xt_table_info *private = t->private;
1016 #ifdef CONFIG_COMPAT
1017 struct xt_table_info tmp;
1018
1019 if (compat) {
1020 ret = compat_table_info(private, &tmp);
1021 xt_compat_flush_offsets(AF_INET6);
1022 private = &tmp;
1023 }
1024 #endif
1025 memset(&info, 0, sizeof(info));
1026 info.valid_hooks = t->valid_hooks;
1027 memcpy(info.hook_entry, private->hook_entry,
1028 sizeof(info.hook_entry));
1029 memcpy(info.underflow, private->underflow,
1030 sizeof(info.underflow));
1031 info.num_entries = private->number;
1032 info.size = private->size;
1033 strcpy(info.name, name);
1034
1035 if (copy_to_user(user, &info, *len) != 0)
1036 ret = -EFAULT;
1037 else
1038 ret = 0;
1039
1040 xt_table_unlock(t);
1041 module_put(t->me);
1042 } else
1043 ret = t ? PTR_ERR(t) : -ENOENT;
1044 #ifdef CONFIG_COMPAT
1045 if (compat)
1046 xt_compat_unlock(AF_INET6);
1047 #endif
1048 return ret;
1049 }
1050
1051 static int
get_entries(struct net * net,struct ip6t_get_entries __user * uptr,const int * len)1052 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1053 const int *len)
1054 {
1055 int ret;
1056 struct ip6t_get_entries get;
1057 struct xt_table *t;
1058
1059 if (*len < sizeof(get))
1060 return -EINVAL;
1061 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1062 return -EFAULT;
1063 if (*len != sizeof(struct ip6t_get_entries) + get.size)
1064 return -EINVAL;
1065
1066 get.name[sizeof(get.name) - 1] = '\0';
1067
1068 t = xt_find_table_lock(net, AF_INET6, get.name);
1069 if (!IS_ERR_OR_NULL(t)) {
1070 struct xt_table_info *private = t->private;
1071 if (get.size == private->size)
1072 ret = copy_entries_to_user(private->size,
1073 t, uptr->entrytable);
1074 else
1075 ret = -EAGAIN;
1076
1077 module_put(t->me);
1078 xt_table_unlock(t);
1079 } else
1080 ret = t ? PTR_ERR(t) : -ENOENT;
1081
1082 return ret;
1083 }
1084
1085 static int
__do_replace(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info * newinfo,unsigned int num_counters,void __user * counters_ptr)1086 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1087 struct xt_table_info *newinfo, unsigned int num_counters,
1088 void __user *counters_ptr)
1089 {
1090 int ret;
1091 struct xt_table *t;
1092 struct xt_table_info *oldinfo;
1093 struct xt_counters *counters;
1094 struct ip6t_entry *iter;
1095
1096 ret = 0;
1097 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1098 if (!counters) {
1099 ret = -ENOMEM;
1100 goto out;
1101 }
1102
1103 t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1104 "ip6table_%s", name);
1105 if (IS_ERR_OR_NULL(t)) {
1106 ret = t ? PTR_ERR(t) : -ENOENT;
1107 goto free_newinfo_counters_untrans;
1108 }
1109
1110 /* You lied! */
1111 if (valid_hooks != t->valid_hooks) {
1112 ret = -EINVAL;
1113 goto put_module;
1114 }
1115
1116 oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1117 if (!oldinfo)
1118 goto put_module;
1119
1120 /* Update module usage count based on number of rules */
1121 if ((oldinfo->number > oldinfo->initial_entries) ||
1122 (newinfo->number <= oldinfo->initial_entries))
1123 module_put(t->me);
1124 if ((oldinfo->number > oldinfo->initial_entries) &&
1125 (newinfo->number <= oldinfo->initial_entries))
1126 module_put(t->me);
1127
1128 /* Get the old counters, and synchronize with replace */
1129 get_counters(oldinfo, counters);
1130
1131 /* Decrease module usage counts and free resource */
1132 xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
1133 cleanup_entry(iter, net);
1134
1135 xt_free_table_info(oldinfo);
1136 if (copy_to_user(counters_ptr, counters,
1137 sizeof(struct xt_counters) * num_counters) != 0) {
1138 /* Silent error, can't fail, new table is already in place */
1139 net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n");
1140 }
1141 vfree(counters);
1142 xt_table_unlock(t);
1143 return ret;
1144
1145 put_module:
1146 module_put(t->me);
1147 xt_table_unlock(t);
1148 free_newinfo_counters_untrans:
1149 vfree(counters);
1150 out:
1151 return ret;
1152 }
1153
1154 static int
do_replace(struct net * net,const void __user * user,unsigned int len)1155 do_replace(struct net *net, const void __user *user, unsigned int len)
1156 {
1157 int ret;
1158 struct ip6t_replace tmp;
1159 struct xt_table_info *newinfo;
1160 void *loc_cpu_entry;
1161 struct ip6t_entry *iter;
1162
1163 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1164 return -EFAULT;
1165
1166 /* overflow check */
1167 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1168 return -ENOMEM;
1169 if (tmp.num_counters == 0)
1170 return -EINVAL;
1171
1172 tmp.name[sizeof(tmp.name)-1] = 0;
1173
1174 newinfo = xt_alloc_table_info(tmp.size);
1175 if (!newinfo)
1176 return -ENOMEM;
1177
1178 loc_cpu_entry = newinfo->entries;
1179 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1180 tmp.size) != 0) {
1181 ret = -EFAULT;
1182 goto free_newinfo;
1183 }
1184
1185 ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1186 if (ret != 0)
1187 goto free_newinfo;
1188
1189 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1190 tmp.num_counters, tmp.counters);
1191 if (ret)
1192 goto free_newinfo_untrans;
1193 return 0;
1194
1195 free_newinfo_untrans:
1196 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1197 cleanup_entry(iter, net);
1198 free_newinfo:
1199 xt_free_table_info(newinfo);
1200 return ret;
1201 }
1202
1203 static int
do_add_counters(struct net * net,const void __user * user,unsigned int len,int compat)1204 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1205 int compat)
1206 {
1207 unsigned int i;
1208 struct xt_counters_info tmp;
1209 struct xt_counters *paddc;
1210 struct xt_table *t;
1211 const struct xt_table_info *private;
1212 int ret = 0;
1213 struct ip6t_entry *iter;
1214 unsigned int addend;
1215
1216 paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
1217 if (IS_ERR(paddc))
1218 return PTR_ERR(paddc);
1219 t = xt_find_table_lock(net, AF_INET6, tmp.name);
1220 if (IS_ERR_OR_NULL(t)) {
1221 ret = t ? PTR_ERR(t) : -ENOENT;
1222 goto free;
1223 }
1224
1225 local_bh_disable();
1226 private = t->private;
1227 if (private->number != tmp.num_counters) {
1228 ret = -EINVAL;
1229 goto unlock_up_free;
1230 }
1231
1232 i = 0;
1233 addend = xt_write_recseq_begin();
1234 xt_entry_foreach(iter, private->entries, private->size) {
1235 struct xt_counters *tmp;
1236
1237 tmp = xt_get_this_cpu_counter(&iter->counters);
1238 ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
1239 ++i;
1240 }
1241 xt_write_recseq_end(addend);
1242 unlock_up_free:
1243 local_bh_enable();
1244 xt_table_unlock(t);
1245 module_put(t->me);
1246 free:
1247 vfree(paddc);
1248
1249 return ret;
1250 }
1251
1252 #ifdef CONFIG_COMPAT
1253 struct compat_ip6t_replace {
1254 char name[XT_TABLE_MAXNAMELEN];
1255 u32 valid_hooks;
1256 u32 num_entries;
1257 u32 size;
1258 u32 hook_entry[NF_INET_NUMHOOKS];
1259 u32 underflow[NF_INET_NUMHOOKS];
1260 u32 num_counters;
1261 compat_uptr_t counters; /* struct xt_counters * */
1262 struct compat_ip6t_entry entries[0];
1263 };
1264
1265 static int
compat_copy_entry_to_user(struct ip6t_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i)1266 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1267 unsigned int *size, struct xt_counters *counters,
1268 unsigned int i)
1269 {
1270 struct xt_entry_target *t;
1271 struct compat_ip6t_entry __user *ce;
1272 u_int16_t target_offset, next_offset;
1273 compat_uint_t origsize;
1274 const struct xt_entry_match *ematch;
1275 int ret = 0;
1276
1277 origsize = *size;
1278 ce = (struct compat_ip6t_entry __user *)*dstptr;
1279 if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1280 copy_to_user(&ce->counters, &counters[i],
1281 sizeof(counters[i])) != 0)
1282 return -EFAULT;
1283
1284 *dstptr += sizeof(struct compat_ip6t_entry);
1285 *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1286
1287 xt_ematch_foreach(ematch, e) {
1288 ret = xt_compat_match_to_user(ematch, dstptr, size);
1289 if (ret != 0)
1290 return ret;
1291 }
1292 target_offset = e->target_offset - (origsize - *size);
1293 t = ip6t_get_target(e);
1294 ret = xt_compat_target_to_user(t, dstptr, size);
1295 if (ret)
1296 return ret;
1297 next_offset = e->next_offset - (origsize - *size);
1298 if (put_user(target_offset, &ce->target_offset) != 0 ||
1299 put_user(next_offset, &ce->next_offset) != 0)
1300 return -EFAULT;
1301 return 0;
1302 }
1303
1304 static int
compat_find_calc_match(struct xt_entry_match * m,const struct ip6t_ip6 * ipv6,int * size)1305 compat_find_calc_match(struct xt_entry_match *m,
1306 const struct ip6t_ip6 *ipv6,
1307 int *size)
1308 {
1309 struct xt_match *match;
1310
1311 match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1312 m->u.user.revision);
1313 if (IS_ERR(match))
1314 return PTR_ERR(match);
1315
1316 m->u.kernel.match = match;
1317 *size += xt_compat_match_offset(match);
1318 return 0;
1319 }
1320
compat_release_entry(struct compat_ip6t_entry * e)1321 static void compat_release_entry(struct compat_ip6t_entry *e)
1322 {
1323 struct xt_entry_target *t;
1324 struct xt_entry_match *ematch;
1325
1326 /* Cleanup all matches */
1327 xt_ematch_foreach(ematch, e)
1328 module_put(ematch->u.kernel.match->me);
1329 t = compat_ip6t_get_target(e);
1330 module_put(t->u.kernel.target->me);
1331 }
1332
1333 static int
check_compat_entry_size_and_hooks(struct compat_ip6t_entry * e,struct xt_table_info * newinfo,unsigned int * size,const unsigned char * base,const unsigned char * limit)1334 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1335 struct xt_table_info *newinfo,
1336 unsigned int *size,
1337 const unsigned char *base,
1338 const unsigned char *limit)
1339 {
1340 struct xt_entry_match *ematch;
1341 struct xt_entry_target *t;
1342 struct xt_target *target;
1343 unsigned int entry_offset;
1344 unsigned int j;
1345 int ret, off;
1346
1347 if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1348 (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
1349 (unsigned char *)e + e->next_offset > limit)
1350 return -EINVAL;
1351
1352 if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1353 sizeof(struct compat_xt_entry_target))
1354 return -EINVAL;
1355
1356 if (!ip6_checkentry(&e->ipv6))
1357 return -EINVAL;
1358
1359 ret = xt_compat_check_entry_offsets(e, e->elems,
1360 e->target_offset, e->next_offset);
1361 if (ret)
1362 return ret;
1363
1364 off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1365 entry_offset = (void *)e - (void *)base;
1366 j = 0;
1367 xt_ematch_foreach(ematch, e) {
1368 ret = compat_find_calc_match(ematch, &e->ipv6, &off);
1369 if (ret != 0)
1370 goto release_matches;
1371 ++j;
1372 }
1373
1374 t = compat_ip6t_get_target(e);
1375 target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1376 t->u.user.revision);
1377 if (IS_ERR(target)) {
1378 ret = PTR_ERR(target);
1379 goto release_matches;
1380 }
1381 t->u.kernel.target = target;
1382
1383 off += xt_compat_target_offset(target);
1384 *size += off;
1385 ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1386 if (ret)
1387 goto out;
1388
1389 return 0;
1390
1391 out:
1392 module_put(t->u.kernel.target->me);
1393 release_matches:
1394 xt_ematch_foreach(ematch, e) {
1395 if (j-- == 0)
1396 break;
1397 module_put(ematch->u.kernel.match->me);
1398 }
1399 return ret;
1400 }
1401
1402 static void
compat_copy_entry_from_user(struct compat_ip6t_entry * e,void ** dstptr,unsigned int * size,struct xt_table_info * newinfo,unsigned char * base)1403 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1404 unsigned int *size,
1405 struct xt_table_info *newinfo, unsigned char *base)
1406 {
1407 struct xt_entry_target *t;
1408 struct ip6t_entry *de;
1409 unsigned int origsize;
1410 int h;
1411 struct xt_entry_match *ematch;
1412
1413 origsize = *size;
1414 de = (struct ip6t_entry *)*dstptr;
1415 memcpy(de, e, sizeof(struct ip6t_entry));
1416 memcpy(&de->counters, &e->counters, sizeof(e->counters));
1417
1418 *dstptr += sizeof(struct ip6t_entry);
1419 *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1420
1421 xt_ematch_foreach(ematch, e)
1422 xt_compat_match_from_user(ematch, dstptr, size);
1423
1424 de->target_offset = e->target_offset - (origsize - *size);
1425 t = compat_ip6t_get_target(e);
1426 xt_compat_target_from_user(t, dstptr, size);
1427
1428 de->next_offset = e->next_offset - (origsize - *size);
1429 for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1430 if ((unsigned char *)de - base < newinfo->hook_entry[h])
1431 newinfo->hook_entry[h] -= origsize - *size;
1432 if ((unsigned char *)de - base < newinfo->underflow[h])
1433 newinfo->underflow[h] -= origsize - *size;
1434 }
1435 }
1436
1437 static int
translate_compat_table(struct net * net,struct xt_table_info ** pinfo,void ** pentry0,const struct compat_ip6t_replace * compatr)1438 translate_compat_table(struct net *net,
1439 struct xt_table_info **pinfo,
1440 void **pentry0,
1441 const struct compat_ip6t_replace *compatr)
1442 {
1443 unsigned int i, j;
1444 struct xt_table_info *newinfo, *info;
1445 void *pos, *entry0, *entry1;
1446 struct compat_ip6t_entry *iter0;
1447 struct ip6t_replace repl;
1448 unsigned int size;
1449 int ret = 0;
1450
1451 info = *pinfo;
1452 entry0 = *pentry0;
1453 size = compatr->size;
1454 info->number = compatr->num_entries;
1455
1456 j = 0;
1457 xt_compat_lock(AF_INET6);
1458 xt_compat_init_offsets(AF_INET6, compatr->num_entries);
1459 /* Walk through entries, checking offsets. */
1460 xt_entry_foreach(iter0, entry0, compatr->size) {
1461 ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1462 entry0,
1463 entry0 + compatr->size);
1464 if (ret != 0)
1465 goto out_unlock;
1466 ++j;
1467 }
1468
1469 ret = -EINVAL;
1470 if (j != compatr->num_entries)
1471 goto out_unlock;
1472
1473 ret = -ENOMEM;
1474 newinfo = xt_alloc_table_info(size);
1475 if (!newinfo)
1476 goto out_unlock;
1477
1478 newinfo->number = compatr->num_entries;
1479 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1480 newinfo->hook_entry[i] = compatr->hook_entry[i];
1481 newinfo->underflow[i] = compatr->underflow[i];
1482 }
1483 entry1 = newinfo->entries;
1484 pos = entry1;
1485 size = compatr->size;
1486 xt_entry_foreach(iter0, entry0, compatr->size)
1487 compat_copy_entry_from_user(iter0, &pos, &size,
1488 newinfo, entry1);
1489
1490 /* all module references in entry0 are now gone. */
1491 xt_compat_flush_offsets(AF_INET6);
1492 xt_compat_unlock(AF_INET6);
1493
1494 memcpy(&repl, compatr, sizeof(*compatr));
1495
1496 for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1497 repl.hook_entry[i] = newinfo->hook_entry[i];
1498 repl.underflow[i] = newinfo->underflow[i];
1499 }
1500
1501 repl.num_counters = 0;
1502 repl.counters = NULL;
1503 repl.size = newinfo->size;
1504 ret = translate_table(net, newinfo, entry1, &repl);
1505 if (ret)
1506 goto free_newinfo;
1507
1508 *pinfo = newinfo;
1509 *pentry0 = entry1;
1510 xt_free_table_info(info);
1511 return 0;
1512
1513 free_newinfo:
1514 xt_free_table_info(newinfo);
1515 return ret;
1516 out_unlock:
1517 xt_compat_flush_offsets(AF_INET6);
1518 xt_compat_unlock(AF_INET6);
1519 xt_entry_foreach(iter0, entry0, compatr->size) {
1520 if (j-- == 0)
1521 break;
1522 compat_release_entry(iter0);
1523 }
1524 return ret;
1525 }
1526
1527 static int
compat_do_replace(struct net * net,void __user * user,unsigned int len)1528 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1529 {
1530 int ret;
1531 struct compat_ip6t_replace tmp;
1532 struct xt_table_info *newinfo;
1533 void *loc_cpu_entry;
1534 struct ip6t_entry *iter;
1535
1536 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1537 return -EFAULT;
1538
1539 /* overflow check */
1540 if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1541 return -ENOMEM;
1542 if (tmp.num_counters == 0)
1543 return -EINVAL;
1544
1545 tmp.name[sizeof(tmp.name)-1] = 0;
1546
1547 newinfo = xt_alloc_table_info(tmp.size);
1548 if (!newinfo)
1549 return -ENOMEM;
1550
1551 loc_cpu_entry = newinfo->entries;
1552 if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1553 tmp.size) != 0) {
1554 ret = -EFAULT;
1555 goto free_newinfo;
1556 }
1557
1558 ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
1559 if (ret != 0)
1560 goto free_newinfo;
1561
1562 ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1563 tmp.num_counters, compat_ptr(tmp.counters));
1564 if (ret)
1565 goto free_newinfo_untrans;
1566 return 0;
1567
1568 free_newinfo_untrans:
1569 xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1570 cleanup_entry(iter, net);
1571 free_newinfo:
1572 xt_free_table_info(newinfo);
1573 return ret;
1574 }
1575
1576 static int
compat_do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1577 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1578 unsigned int len)
1579 {
1580 int ret;
1581
1582 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1583 return -EPERM;
1584
1585 switch (cmd) {
1586 case IP6T_SO_SET_REPLACE:
1587 ret = compat_do_replace(sock_net(sk), user, len);
1588 break;
1589
1590 case IP6T_SO_SET_ADD_COUNTERS:
1591 ret = do_add_counters(sock_net(sk), user, len, 1);
1592 break;
1593
1594 default:
1595 ret = -EINVAL;
1596 }
1597
1598 return ret;
1599 }
1600
1601 struct compat_ip6t_get_entries {
1602 char name[XT_TABLE_MAXNAMELEN];
1603 compat_uint_t size;
1604 struct compat_ip6t_entry entrytable[0];
1605 };
1606
1607 static int
compat_copy_entries_to_user(unsigned int total_size,struct xt_table * table,void __user * userptr)1608 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1609 void __user *userptr)
1610 {
1611 struct xt_counters *counters;
1612 const struct xt_table_info *private = table->private;
1613 void __user *pos;
1614 unsigned int size;
1615 int ret = 0;
1616 unsigned int i = 0;
1617 struct ip6t_entry *iter;
1618
1619 counters = alloc_counters(table);
1620 if (IS_ERR(counters))
1621 return PTR_ERR(counters);
1622
1623 pos = userptr;
1624 size = total_size;
1625 xt_entry_foreach(iter, private->entries, total_size) {
1626 ret = compat_copy_entry_to_user(iter, &pos,
1627 &size, counters, i++);
1628 if (ret != 0)
1629 break;
1630 }
1631
1632 vfree(counters);
1633 return ret;
1634 }
1635
1636 static int
compat_get_entries(struct net * net,struct compat_ip6t_get_entries __user * uptr,int * len)1637 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1638 int *len)
1639 {
1640 int ret;
1641 struct compat_ip6t_get_entries get;
1642 struct xt_table *t;
1643
1644 if (*len < sizeof(get))
1645 return -EINVAL;
1646
1647 if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1648 return -EFAULT;
1649
1650 if (*len != sizeof(struct compat_ip6t_get_entries) + get.size)
1651 return -EINVAL;
1652
1653 get.name[sizeof(get.name) - 1] = '\0';
1654
1655 xt_compat_lock(AF_INET6);
1656 t = xt_find_table_lock(net, AF_INET6, get.name);
1657 if (!IS_ERR_OR_NULL(t)) {
1658 const struct xt_table_info *private = t->private;
1659 struct xt_table_info info;
1660 ret = compat_table_info(private, &info);
1661 if (!ret && get.size == info.size)
1662 ret = compat_copy_entries_to_user(private->size,
1663 t, uptr->entrytable);
1664 else if (!ret)
1665 ret = -EAGAIN;
1666
1667 xt_compat_flush_offsets(AF_INET6);
1668 module_put(t->me);
1669 xt_table_unlock(t);
1670 } else
1671 ret = t ? PTR_ERR(t) : -ENOENT;
1672
1673 xt_compat_unlock(AF_INET6);
1674 return ret;
1675 }
1676
1677 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1678
1679 static int
compat_do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1680 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1681 {
1682 int ret;
1683
1684 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1685 return -EPERM;
1686
1687 switch (cmd) {
1688 case IP6T_SO_GET_INFO:
1689 ret = get_info(sock_net(sk), user, len, 1);
1690 break;
1691 case IP6T_SO_GET_ENTRIES:
1692 ret = compat_get_entries(sock_net(sk), user, len);
1693 break;
1694 default:
1695 ret = do_ip6t_get_ctl(sk, cmd, user, len);
1696 }
1697 return ret;
1698 }
1699 #endif
1700
1701 static int
do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1702 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1703 {
1704 int ret;
1705
1706 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1707 return -EPERM;
1708
1709 switch (cmd) {
1710 case IP6T_SO_SET_REPLACE:
1711 ret = do_replace(sock_net(sk), user, len);
1712 break;
1713
1714 case IP6T_SO_SET_ADD_COUNTERS:
1715 ret = do_add_counters(sock_net(sk), user, len, 0);
1716 break;
1717
1718 default:
1719 ret = -EINVAL;
1720 }
1721
1722 return ret;
1723 }
1724
1725 static int
do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1726 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1727 {
1728 int ret;
1729
1730 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
1731 return -EPERM;
1732
1733 switch (cmd) {
1734 case IP6T_SO_GET_INFO:
1735 ret = get_info(sock_net(sk), user, len, 0);
1736 break;
1737
1738 case IP6T_SO_GET_ENTRIES:
1739 ret = get_entries(sock_net(sk), user, len);
1740 break;
1741
1742 case IP6T_SO_GET_REVISION_MATCH:
1743 case IP6T_SO_GET_REVISION_TARGET: {
1744 struct xt_get_revision rev;
1745 int target;
1746
1747 if (*len != sizeof(rev)) {
1748 ret = -EINVAL;
1749 break;
1750 }
1751 if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
1752 ret = -EFAULT;
1753 break;
1754 }
1755 rev.name[sizeof(rev.name)-1] = 0;
1756
1757 if (cmd == IP6T_SO_GET_REVISION_TARGET)
1758 target = 1;
1759 else
1760 target = 0;
1761
1762 try_then_request_module(xt_find_revision(AF_INET6, rev.name,
1763 rev.revision,
1764 target, &ret),
1765 "ip6t_%s", rev.name);
1766 break;
1767 }
1768
1769 default:
1770 ret = -EINVAL;
1771 }
1772
1773 return ret;
1774 }
1775
__ip6t_unregister_table(struct net * net,struct xt_table * table)1776 static void __ip6t_unregister_table(struct net *net, struct xt_table *table)
1777 {
1778 struct xt_table_info *private;
1779 void *loc_cpu_entry;
1780 struct module *table_owner = table->me;
1781 struct ip6t_entry *iter;
1782
1783 private = xt_unregister_table(table);
1784
1785 /* Decrease module usage counts and free resources */
1786 loc_cpu_entry = private->entries;
1787 xt_entry_foreach(iter, loc_cpu_entry, private->size)
1788 cleanup_entry(iter, net);
1789 if (private->number > private->initial_entries)
1790 module_put(table_owner);
1791 xt_free_table_info(private);
1792 }
1793
ip6t_register_table(struct net * net,const struct xt_table * table,const struct ip6t_replace * repl,const struct nf_hook_ops * ops,struct xt_table ** res)1794 int ip6t_register_table(struct net *net, const struct xt_table *table,
1795 const struct ip6t_replace *repl,
1796 const struct nf_hook_ops *ops,
1797 struct xt_table **res)
1798 {
1799 int ret;
1800 struct xt_table_info *newinfo;
1801 struct xt_table_info bootstrap = {0};
1802 void *loc_cpu_entry;
1803 struct xt_table *new_table;
1804
1805 newinfo = xt_alloc_table_info(repl->size);
1806 if (!newinfo)
1807 return -ENOMEM;
1808
1809 loc_cpu_entry = newinfo->entries;
1810 memcpy(loc_cpu_entry, repl->entries, repl->size);
1811
1812 ret = translate_table(net, newinfo, loc_cpu_entry, repl);
1813 if (ret != 0)
1814 goto out_free;
1815
1816 new_table = xt_register_table(net, table, &bootstrap, newinfo);
1817 if (IS_ERR(new_table)) {
1818 ret = PTR_ERR(new_table);
1819 goto out_free;
1820 }
1821
1822 /* set res now, will see skbs right after nf_register_net_hooks */
1823 WRITE_ONCE(*res, new_table);
1824
1825 ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
1826 if (ret != 0) {
1827 __ip6t_unregister_table(net, new_table);
1828 *res = NULL;
1829 }
1830
1831 return ret;
1832
1833 out_free:
1834 xt_free_table_info(newinfo);
1835 return ret;
1836 }
1837
ip6t_unregister_table(struct net * net,struct xt_table * table,const struct nf_hook_ops * ops)1838 void ip6t_unregister_table(struct net *net, struct xt_table *table,
1839 const struct nf_hook_ops *ops)
1840 {
1841 nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
1842 __ip6t_unregister_table(net, table);
1843 }
1844
1845 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
1846 static inline bool
icmp6_type_code_match(u_int8_t test_type,u_int8_t min_code,u_int8_t max_code,u_int8_t type,u_int8_t code,bool invert)1847 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
1848 u_int8_t type, u_int8_t code,
1849 bool invert)
1850 {
1851 return (type == test_type && code >= min_code && code <= max_code)
1852 ^ invert;
1853 }
1854
1855 static bool
icmp6_match(const struct sk_buff * skb,struct xt_action_param * par)1856 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
1857 {
1858 const struct icmp6hdr *ic;
1859 struct icmp6hdr _icmph;
1860 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1861
1862 /* Must not be a fragment. */
1863 if (par->fragoff != 0)
1864 return false;
1865
1866 ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
1867 if (ic == NULL) {
1868 /* We've been asked to examine this packet, and we
1869 * can't. Hence, no choice but to drop.
1870 */
1871 par->hotdrop = true;
1872 return false;
1873 }
1874
1875 return icmp6_type_code_match(icmpinfo->type,
1876 icmpinfo->code[0],
1877 icmpinfo->code[1],
1878 ic->icmp6_type, ic->icmp6_code,
1879 !!(icmpinfo->invflags&IP6T_ICMP_INV));
1880 }
1881
1882 /* Called when user tries to insert an entry of this type. */
icmp6_checkentry(const struct xt_mtchk_param * par)1883 static int icmp6_checkentry(const struct xt_mtchk_param *par)
1884 {
1885 const struct ip6t_icmp *icmpinfo = par->matchinfo;
1886
1887 /* Must specify no unknown invflags */
1888 return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
1889 }
1890
1891 /* The built-in targets: standard (NULL) and error. */
1892 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
1893 {
1894 .name = XT_STANDARD_TARGET,
1895 .targetsize = sizeof(int),
1896 .family = NFPROTO_IPV6,
1897 #ifdef CONFIG_COMPAT
1898 .compatsize = sizeof(compat_int_t),
1899 .compat_from_user = compat_standard_from_user,
1900 .compat_to_user = compat_standard_to_user,
1901 #endif
1902 },
1903 {
1904 .name = XT_ERROR_TARGET,
1905 .target = ip6t_error,
1906 .targetsize = XT_FUNCTION_MAXNAMELEN,
1907 .family = NFPROTO_IPV6,
1908 },
1909 };
1910
1911 static struct nf_sockopt_ops ip6t_sockopts = {
1912 .pf = PF_INET6,
1913 .set_optmin = IP6T_BASE_CTL,
1914 .set_optmax = IP6T_SO_SET_MAX+1,
1915 .set = do_ip6t_set_ctl,
1916 #ifdef CONFIG_COMPAT
1917 .compat_set = compat_do_ip6t_set_ctl,
1918 #endif
1919 .get_optmin = IP6T_BASE_CTL,
1920 .get_optmax = IP6T_SO_GET_MAX+1,
1921 .get = do_ip6t_get_ctl,
1922 #ifdef CONFIG_COMPAT
1923 .compat_get = compat_do_ip6t_get_ctl,
1924 #endif
1925 .owner = THIS_MODULE,
1926 };
1927
1928 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
1929 {
1930 .name = "icmp6",
1931 .match = icmp6_match,
1932 .matchsize = sizeof(struct ip6t_icmp),
1933 .checkentry = icmp6_checkentry,
1934 .proto = IPPROTO_ICMPV6,
1935 .family = NFPROTO_IPV6,
1936 },
1937 };
1938
ip6_tables_net_init(struct net * net)1939 static int __net_init ip6_tables_net_init(struct net *net)
1940 {
1941 return xt_proto_init(net, NFPROTO_IPV6);
1942 }
1943
ip6_tables_net_exit(struct net * net)1944 static void __net_exit ip6_tables_net_exit(struct net *net)
1945 {
1946 xt_proto_fini(net, NFPROTO_IPV6);
1947 }
1948
1949 static struct pernet_operations ip6_tables_net_ops = {
1950 .init = ip6_tables_net_init,
1951 .exit = ip6_tables_net_exit,
1952 };
1953
ip6_tables_init(void)1954 static int __init ip6_tables_init(void)
1955 {
1956 int ret;
1957
1958 ret = register_pernet_subsys(&ip6_tables_net_ops);
1959 if (ret < 0)
1960 goto err1;
1961
1962 /* No one else will be downing sem now, so we won't sleep */
1963 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1964 if (ret < 0)
1965 goto err2;
1966 ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1967 if (ret < 0)
1968 goto err4;
1969
1970 /* Register setsockopt */
1971 ret = nf_register_sockopt(&ip6t_sockopts);
1972 if (ret < 0)
1973 goto err5;
1974
1975 pr_info("(C) 2000-2006 Netfilter Core Team\n");
1976 return 0;
1977
1978 err5:
1979 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1980 err4:
1981 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1982 err2:
1983 unregister_pernet_subsys(&ip6_tables_net_ops);
1984 err1:
1985 return ret;
1986 }
1987
ip6_tables_fini(void)1988 static void __exit ip6_tables_fini(void)
1989 {
1990 nf_unregister_sockopt(&ip6t_sockopts);
1991
1992 xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
1993 xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
1994 unregister_pernet_subsys(&ip6_tables_net_ops);
1995 }
1996
1997 EXPORT_SYMBOL(ip6t_register_table);
1998 EXPORT_SYMBOL(ip6t_unregister_table);
1999 EXPORT_SYMBOL(ip6t_do_table);
2000
2001 module_init(ip6_tables_init);
2002 module_exit(ip6_tables_fini);
2003