1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * This code is GPL.
8 */
9 #include <linux/kernel.h>
10 #include <linux/netfilter.h>
11 #include <net/protocol.h>
12 #include <linux/init.h>
13 #include <linux/skbuff.h>
14 #include <linux/wait.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/if.h>
18 #include <linux/netdevice.h>
19 #include <linux/netfilter_ipv6.h>
20 #include <linux/inetdevice.h>
21 #include <linux/proc_fs.h>
22 #include <linux/mutex.h>
23 #include <linux/mm.h>
24 #include <linux/rcupdate.h>
25 #include <net/net_namespace.h>
26 #include <net/netfilter/nf_queue.h>
27 #include <net/sock.h>
28
29 #include "nf_internals.h"
30
31 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
32 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
33
34 DEFINE_PER_CPU(bool, nf_skb_duplicated);
35 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
36
37 #ifdef CONFIG_JUMP_LABEL
38 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
39 EXPORT_SYMBOL(nf_hooks_needed);
40 #endif
41
42 static DEFINE_MUTEX(nf_hook_mutex);
43
44 /* max hooks per family/hooknum */
45 #define MAX_HOOK_COUNT 1024
46
47 #define nf_entry_dereference(e) \
48 rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
49
allocate_hook_entries_size(u16 num)50 static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
51 {
52 struct nf_hook_entries *e;
53 size_t alloc = sizeof(*e) +
54 sizeof(struct nf_hook_entry) * num +
55 sizeof(struct nf_hook_ops *) * num +
56 sizeof(struct nf_hook_entries_rcu_head);
57
58 if (num == 0)
59 return NULL;
60
61 e = kvzalloc(alloc, GFP_KERNEL);
62 if (e)
63 e->num_hook_entries = num;
64 return e;
65 }
66
__nf_hook_entries_free(struct rcu_head * h)67 static void __nf_hook_entries_free(struct rcu_head *h)
68 {
69 struct nf_hook_entries_rcu_head *head;
70
71 head = container_of(h, struct nf_hook_entries_rcu_head, head);
72 kvfree(head->allocation);
73 }
74
nf_hook_entries_free(struct nf_hook_entries * e)75 static void nf_hook_entries_free(struct nf_hook_entries *e)
76 {
77 struct nf_hook_entries_rcu_head *head;
78 struct nf_hook_ops **ops;
79 unsigned int num;
80
81 if (!e)
82 return;
83
84 num = e->num_hook_entries;
85 ops = nf_hook_entries_get_hook_ops(e);
86 head = (void *)&ops[num];
87 head->allocation = e;
88 call_rcu(&head->head, __nf_hook_entries_free);
89 }
90
accept_all(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)91 static unsigned int accept_all(void *priv,
92 struct sk_buff *skb,
93 const struct nf_hook_state *state)
94 {
95 return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */
96 }
97
98 static const struct nf_hook_ops dummy_ops = {
99 .hook = accept_all,
100 .priority = INT_MIN,
101 };
102
103 static struct nf_hook_entries *
nf_hook_entries_grow(const struct nf_hook_entries * old,const struct nf_hook_ops * reg)104 nf_hook_entries_grow(const struct nf_hook_entries *old,
105 const struct nf_hook_ops *reg)
106 {
107 unsigned int i, alloc_entries, nhooks, old_entries;
108 struct nf_hook_ops **orig_ops = NULL;
109 struct nf_hook_ops **new_ops;
110 struct nf_hook_entries *new;
111 bool inserted = false;
112
113 alloc_entries = 1;
114 old_entries = old ? old->num_hook_entries : 0;
115
116 if (old) {
117 orig_ops = nf_hook_entries_get_hook_ops(old);
118
119 for (i = 0; i < old_entries; i++) {
120 if (orig_ops[i] != &dummy_ops)
121 alloc_entries++;
122 }
123 }
124
125 if (alloc_entries > MAX_HOOK_COUNT)
126 return ERR_PTR(-E2BIG);
127
128 new = allocate_hook_entries_size(alloc_entries);
129 if (!new)
130 return ERR_PTR(-ENOMEM);
131
132 new_ops = nf_hook_entries_get_hook_ops(new);
133
134 i = 0;
135 nhooks = 0;
136 while (i < old_entries) {
137 if (orig_ops[i] == &dummy_ops) {
138 ++i;
139 continue;
140 }
141
142 if (inserted || reg->priority > orig_ops[i]->priority) {
143 new_ops[nhooks] = (void *)orig_ops[i];
144 new->hooks[nhooks] = old->hooks[i];
145 i++;
146 } else {
147 new_ops[nhooks] = (void *)reg;
148 new->hooks[nhooks].hook = reg->hook;
149 new->hooks[nhooks].priv = reg->priv;
150 inserted = true;
151 }
152 nhooks++;
153 }
154
155 if (!inserted) {
156 new_ops[nhooks] = (void *)reg;
157 new->hooks[nhooks].hook = reg->hook;
158 new->hooks[nhooks].priv = reg->priv;
159 }
160
161 return new;
162 }
163
hooks_validate(const struct nf_hook_entries * hooks)164 static void hooks_validate(const struct nf_hook_entries *hooks)
165 {
166 #ifdef CONFIG_DEBUG_MISC
167 struct nf_hook_ops **orig_ops;
168 int prio = INT_MIN;
169 size_t i = 0;
170
171 orig_ops = nf_hook_entries_get_hook_ops(hooks);
172
173 for (i = 0; i < hooks->num_hook_entries; i++) {
174 if (orig_ops[i] == &dummy_ops)
175 continue;
176
177 WARN_ON(orig_ops[i]->priority < prio);
178
179 if (orig_ops[i]->priority > prio)
180 prio = orig_ops[i]->priority;
181 }
182 #endif
183 }
184
nf_hook_entries_insert_raw(struct nf_hook_entries __rcu ** pp,const struct nf_hook_ops * reg)185 int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
186 const struct nf_hook_ops *reg)
187 {
188 struct nf_hook_entries *new_hooks;
189 struct nf_hook_entries *p;
190
191 p = rcu_dereference_raw(*pp);
192 new_hooks = nf_hook_entries_grow(p, reg);
193 if (IS_ERR(new_hooks))
194 return PTR_ERR(new_hooks);
195
196 hooks_validate(new_hooks);
197
198 rcu_assign_pointer(*pp, new_hooks);
199
200 BUG_ON(p == new_hooks);
201 nf_hook_entries_free(p);
202 return 0;
203 }
204 EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw);
205
206 /*
207 * __nf_hook_entries_try_shrink - try to shrink hook array
208 *
209 * @old -- current hook blob at @pp
210 * @pp -- location of hook blob
211 *
212 * Hook unregistration must always succeed, so to-be-removed hooks
213 * are replaced by a dummy one that will just move to next hook.
214 *
215 * This counts the current dummy hooks, attempts to allocate new blob,
216 * copies the live hooks, then replaces and discards old one.
217 *
218 * return values:
219 *
220 * Returns address to free, or NULL.
221 */
__nf_hook_entries_try_shrink(struct nf_hook_entries * old,struct nf_hook_entries __rcu ** pp)222 static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old,
223 struct nf_hook_entries __rcu **pp)
224 {
225 unsigned int i, j, skip = 0, hook_entries;
226 struct nf_hook_entries *new = NULL;
227 struct nf_hook_ops **orig_ops;
228 struct nf_hook_ops **new_ops;
229
230 if (WARN_ON_ONCE(!old))
231 return NULL;
232
233 orig_ops = nf_hook_entries_get_hook_ops(old);
234 for (i = 0; i < old->num_hook_entries; i++) {
235 if (orig_ops[i] == &dummy_ops)
236 skip++;
237 }
238
239 /* if skip == hook_entries all hooks have been removed */
240 hook_entries = old->num_hook_entries;
241 if (skip == hook_entries)
242 goto out_assign;
243
244 if (skip == 0)
245 return NULL;
246
247 hook_entries -= skip;
248 new = allocate_hook_entries_size(hook_entries);
249 if (!new)
250 return NULL;
251
252 new_ops = nf_hook_entries_get_hook_ops(new);
253 for (i = 0, j = 0; i < old->num_hook_entries; i++) {
254 if (orig_ops[i] == &dummy_ops)
255 continue;
256 new->hooks[j] = old->hooks[i];
257 new_ops[j] = (void *)orig_ops[i];
258 j++;
259 }
260 hooks_validate(new);
261 out_assign:
262 rcu_assign_pointer(*pp, new);
263 return old;
264 }
265
266 static struct nf_hook_entries __rcu **
nf_hook_entry_head(struct net * net,int pf,unsigned int hooknum,struct net_device * dev)267 nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
268 struct net_device *dev)
269 {
270 switch (pf) {
271 case NFPROTO_NETDEV:
272 break;
273 #ifdef CONFIG_NETFILTER_FAMILY_ARP
274 case NFPROTO_ARP:
275 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
276 return NULL;
277 return net->nf.hooks_arp + hooknum;
278 #endif
279 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
280 case NFPROTO_BRIDGE:
281 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_bridge) <= hooknum))
282 return NULL;
283 return net->nf.hooks_bridge + hooknum;
284 #endif
285 case NFPROTO_IPV4:
286 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
287 return NULL;
288 return net->nf.hooks_ipv4 + hooknum;
289 case NFPROTO_IPV6:
290 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
291 return NULL;
292 return net->nf.hooks_ipv6 + hooknum;
293 default:
294 WARN_ON_ONCE(1);
295 return NULL;
296 }
297
298 #ifdef CONFIG_NETFILTER_INGRESS
299 if (hooknum == NF_NETDEV_INGRESS) {
300 if (dev && dev_net(dev) == net)
301 return &dev->nf_hooks_ingress;
302 }
303 #endif
304 WARN_ON_ONCE(1);
305 return NULL;
306 }
307
__nf_register_net_hook(struct net * net,int pf,const struct nf_hook_ops * reg)308 static int __nf_register_net_hook(struct net *net, int pf,
309 const struct nf_hook_ops *reg)
310 {
311 struct nf_hook_entries *p, *new_hooks;
312 struct nf_hook_entries __rcu **pp;
313
314 if (pf == NFPROTO_NETDEV) {
315 #ifndef CONFIG_NETFILTER_INGRESS
316 if (reg->hooknum == NF_NETDEV_INGRESS)
317 return -EOPNOTSUPP;
318 #endif
319 if (reg->hooknum != NF_NETDEV_INGRESS ||
320 !reg->dev || dev_net(reg->dev) != net)
321 return -EINVAL;
322 }
323
324 pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
325 if (!pp)
326 return -EINVAL;
327
328 mutex_lock(&nf_hook_mutex);
329
330 p = nf_entry_dereference(*pp);
331 new_hooks = nf_hook_entries_grow(p, reg);
332
333 if (!IS_ERR(new_hooks)) {
334 hooks_validate(new_hooks);
335 rcu_assign_pointer(*pp, new_hooks);
336 }
337
338 mutex_unlock(&nf_hook_mutex);
339 if (IS_ERR(new_hooks))
340 return PTR_ERR(new_hooks);
341
342 #ifdef CONFIG_NETFILTER_INGRESS
343 if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
344 net_inc_ingress_queue();
345 #endif
346 #ifdef CONFIG_JUMP_LABEL
347 static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]);
348 #endif
349 BUG_ON(p == new_hooks);
350 nf_hook_entries_free(p);
351 return 0;
352 }
353
354 /*
355 * nf_remove_net_hook - remove a hook from blob
356 *
357 * @oldp: current address of hook blob
358 * @unreg: hook to unregister
359 *
360 * This cannot fail, hook unregistration must always succeed.
361 * Therefore replace the to-be-removed hook with a dummy hook.
362 */
nf_remove_net_hook(struct nf_hook_entries * old,const struct nf_hook_ops * unreg)363 static bool nf_remove_net_hook(struct nf_hook_entries *old,
364 const struct nf_hook_ops *unreg)
365 {
366 struct nf_hook_ops **orig_ops;
367 unsigned int i;
368
369 orig_ops = nf_hook_entries_get_hook_ops(old);
370 for (i = 0; i < old->num_hook_entries; i++) {
371 if (orig_ops[i] != unreg)
372 continue;
373 WRITE_ONCE(old->hooks[i].hook, accept_all);
374 WRITE_ONCE(orig_ops[i], &dummy_ops);
375 return true;
376 }
377
378 return false;
379 }
380
__nf_unregister_net_hook(struct net * net,int pf,const struct nf_hook_ops * reg)381 static void __nf_unregister_net_hook(struct net *net, int pf,
382 const struct nf_hook_ops *reg)
383 {
384 struct nf_hook_entries __rcu **pp;
385 struct nf_hook_entries *p;
386
387 pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
388 if (!pp)
389 return;
390
391 mutex_lock(&nf_hook_mutex);
392
393 p = nf_entry_dereference(*pp);
394 if (WARN_ON_ONCE(!p)) {
395 mutex_unlock(&nf_hook_mutex);
396 return;
397 }
398
399 if (nf_remove_net_hook(p, reg)) {
400 #ifdef CONFIG_NETFILTER_INGRESS
401 if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
402 net_dec_ingress_queue();
403 #endif
404 #ifdef CONFIG_JUMP_LABEL
405 static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]);
406 #endif
407 } else {
408 WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
409 }
410
411 p = __nf_hook_entries_try_shrink(p, pp);
412 mutex_unlock(&nf_hook_mutex);
413 if (!p)
414 return;
415
416 nf_queue_nf_hook_drop(net);
417 nf_hook_entries_free(p);
418 }
419
nf_unregister_net_hook(struct net * net,const struct nf_hook_ops * reg)420 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
421 {
422 if (reg->pf == NFPROTO_INET) {
423 __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
424 __nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
425 } else {
426 __nf_unregister_net_hook(net, reg->pf, reg);
427 }
428 }
429 EXPORT_SYMBOL(nf_unregister_net_hook);
430
nf_hook_entries_delete_raw(struct nf_hook_entries __rcu ** pp,const struct nf_hook_ops * reg)431 void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
432 const struct nf_hook_ops *reg)
433 {
434 struct nf_hook_entries *p;
435
436 p = rcu_dereference_raw(*pp);
437 if (nf_remove_net_hook(p, reg)) {
438 p = __nf_hook_entries_try_shrink(p, pp);
439 nf_hook_entries_free(p);
440 }
441 }
442 EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw);
443
nf_register_net_hook(struct net * net,const struct nf_hook_ops * reg)444 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
445 {
446 int err;
447
448 if (reg->pf == NFPROTO_INET) {
449 err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
450 if (err < 0)
451 return err;
452
453 err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
454 if (err < 0) {
455 __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
456 return err;
457 }
458 } else {
459 err = __nf_register_net_hook(net, reg->pf, reg);
460 if (err < 0)
461 return err;
462 }
463
464 return 0;
465 }
466 EXPORT_SYMBOL(nf_register_net_hook);
467
nf_register_net_hooks(struct net * net,const struct nf_hook_ops * reg,unsigned int n)468 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
469 unsigned int n)
470 {
471 unsigned int i;
472 int err = 0;
473
474 for (i = 0; i < n; i++) {
475 err = nf_register_net_hook(net, ®[i]);
476 if (err)
477 goto err;
478 }
479 return err;
480
481 err:
482 if (i > 0)
483 nf_unregister_net_hooks(net, reg, i);
484 return err;
485 }
486 EXPORT_SYMBOL(nf_register_net_hooks);
487
nf_unregister_net_hooks(struct net * net,const struct nf_hook_ops * reg,unsigned int hookcount)488 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
489 unsigned int hookcount)
490 {
491 unsigned int i;
492
493 for (i = 0; i < hookcount; i++)
494 nf_unregister_net_hook(net, ®[i]);
495 }
496 EXPORT_SYMBOL(nf_unregister_net_hooks);
497
498 /* Returns 1 if okfn() needs to be executed by the caller,
499 * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
nf_hook_slow(struct sk_buff * skb,struct nf_hook_state * state,const struct nf_hook_entries * e,unsigned int s)500 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
501 const struct nf_hook_entries *e, unsigned int s)
502 {
503 unsigned int verdict;
504 int ret;
505
506 for (; s < e->num_hook_entries; s++) {
507 verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
508 switch (verdict & NF_VERDICT_MASK) {
509 case NF_ACCEPT:
510 break;
511 case NF_DROP:
512 kfree_skb(skb);
513 ret = NF_DROP_GETERR(verdict);
514 if (ret == 0)
515 ret = -EPERM;
516 return ret;
517 case NF_QUEUE:
518 ret = nf_queue(skb, state, s, verdict);
519 if (ret == 1)
520 continue;
521 return ret;
522 default:
523 /* Implicit handling for NF_STOLEN, as well as any other
524 * non conventional verdicts.
525 */
526 return 0;
527 }
528 }
529
530 return 1;
531 }
532 EXPORT_SYMBOL(nf_hook_slow);
533
534 /* This needs to be compiled in any case to avoid dependencies between the
535 * nfnetlink_queue code and nf_conntrack.
536 */
537 struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
538 EXPORT_SYMBOL_GPL(nfnl_ct_hook);
539
540 struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
541 EXPORT_SYMBOL_GPL(nf_ct_hook);
542
543 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
544 /* This does not belong here, but locally generated errors need it if connection
545 tracking in use: without this, connection may not be in hash table, and hence
546 manufactured ICMP or RST packets will not be associated with it. */
547 void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
548 __rcu __read_mostly;
549 EXPORT_SYMBOL(ip_ct_attach);
550
551 struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
552 EXPORT_SYMBOL_GPL(nf_nat_hook);
553
nf_ct_attach(struct sk_buff * new,const struct sk_buff * skb)554 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
555 {
556 void (*attach)(struct sk_buff *, const struct sk_buff *);
557
558 if (skb->_nfct) {
559 rcu_read_lock();
560 attach = rcu_dereference(ip_ct_attach);
561 if (attach)
562 attach(new, skb);
563 rcu_read_unlock();
564 }
565 }
566 EXPORT_SYMBOL(nf_ct_attach);
567
nf_conntrack_destroy(struct nf_conntrack * nfct)568 void nf_conntrack_destroy(struct nf_conntrack *nfct)
569 {
570 struct nf_ct_hook *ct_hook;
571
572 rcu_read_lock();
573 ct_hook = rcu_dereference(nf_ct_hook);
574 if (ct_hook)
575 ct_hook->destroy(nfct);
576 rcu_read_unlock();
577
578 WARN_ON(!ct_hook);
579 }
580 EXPORT_SYMBOL(nf_conntrack_destroy);
581
nf_ct_get_tuple_skb(struct nf_conntrack_tuple * dst_tuple,const struct sk_buff * skb)582 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
583 const struct sk_buff *skb)
584 {
585 struct nf_ct_hook *ct_hook;
586 bool ret = false;
587
588 rcu_read_lock();
589 ct_hook = rcu_dereference(nf_ct_hook);
590 if (ct_hook)
591 ret = ct_hook->get_tuple_skb(dst_tuple, skb);
592 rcu_read_unlock();
593 return ret;
594 }
595 EXPORT_SYMBOL(nf_ct_get_tuple_skb);
596
597 /* Built-in default zone used e.g. by modules. */
598 const struct nf_conntrack_zone nf_ct_zone_dflt = {
599 .id = NF_CT_DEFAULT_ZONE_ID,
600 .dir = NF_CT_DEFAULT_ZONE_DIR,
601 };
602 EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
603 #endif /* CONFIG_NF_CONNTRACK */
604
605 static void __net_init
__netfilter_net_init(struct nf_hook_entries __rcu ** e,int max)606 __netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
607 {
608 int h;
609
610 for (h = 0; h < max; h++)
611 RCU_INIT_POINTER(e[h], NULL);
612 }
613
netfilter_net_init(struct net * net)614 static int __net_init netfilter_net_init(struct net *net)
615 {
616 __netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
617 __netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
618 #ifdef CONFIG_NETFILTER_FAMILY_ARP
619 __netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
620 #endif
621 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
622 __netfilter_net_init(net->nf.hooks_bridge, ARRAY_SIZE(net->nf.hooks_bridge));
623 #endif
624 #ifdef CONFIG_PROC_FS
625 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
626 net->proc_net);
627 if (!net->nf.proc_netfilter) {
628 if (!net_eq(net, &init_net))
629 pr_err("cannot create netfilter proc entry");
630
631 return -ENOMEM;
632 }
633 #endif
634
635 return 0;
636 }
637
netfilter_net_exit(struct net * net)638 static void __net_exit netfilter_net_exit(struct net *net)
639 {
640 remove_proc_entry("netfilter", net->proc_net);
641 }
642
643 static struct pernet_operations netfilter_net_ops = {
644 .init = netfilter_net_init,
645 .exit = netfilter_net_exit,
646 };
647
netfilter_init(void)648 int __init netfilter_init(void)
649 {
650 int ret;
651
652 ret = register_pernet_subsys(&netfilter_net_ops);
653 if (ret < 0)
654 goto err;
655
656 ret = netfilter_log_init();
657 if (ret < 0)
658 goto err_pernet;
659
660 return 0;
661 err_pernet:
662 unregister_pernet_subsys(&netfilter_net_ops);
663 err:
664 return ret;
665 }
666