1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * This code is GPL.
8 */
9 #include <linux/kernel.h>
10 #include <linux/netfilter.h>
11 #include <net/protocol.h>
12 #include <linux/init.h>
13 #include <linux/skbuff.h>
14 #include <linux/wait.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/if.h>
18 #include <linux/netdevice.h>
19 #include <linux/netfilter_ipv6.h>
20 #include <linux/inetdevice.h>
21 #include <linux/proc_fs.h>
22 #include <linux/mutex.h>
23 #include <linux/mm.h>
24 #include <linux/rcupdate.h>
25 #include <net/net_namespace.h>
26 #include <net/netfilter/nf_queue.h>
27 #include <net/sock.h>
28
29 #include "nf_internals.h"
30
31 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
32 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
33
34 DEFINE_PER_CPU(bool, nf_skb_duplicated);
35 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
36
37 #ifdef CONFIG_JUMP_LABEL
38 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
39 EXPORT_SYMBOL(nf_hooks_needed);
40 #endif
41
42 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
43 struct nf_hook_entries __rcu *init_nf_hooks_bridge[NF_INET_NUMHOOKS];
44 struct nf_hook_entries __rcu **init_nf_hooks_bridgep = &init_nf_hooks_bridge[0];
45 EXPORT_SYMBOL_GPL(init_nf_hooks_bridgep);
46 #endif
47
48 static DEFINE_MUTEX(nf_hook_mutex);
49
50 /* max hooks per family/hooknum */
51 #define MAX_HOOK_COUNT 1024
52
53 #define nf_entry_dereference(e) \
54 rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
55
allocate_hook_entries_size(u16 num)56 static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
57 {
58 struct nf_hook_entries *e;
59 size_t alloc = sizeof(*e) +
60 sizeof(struct nf_hook_entry) * num +
61 sizeof(struct nf_hook_ops *) * num +
62 sizeof(struct nf_hook_entries_rcu_head);
63
64 if (num == 0)
65 return NULL;
66
67 e = kvzalloc(alloc, GFP_KERNEL);
68 if (e)
69 e->num_hook_entries = num;
70 return e;
71 }
72
__nf_hook_entries_free(struct rcu_head * h)73 static void __nf_hook_entries_free(struct rcu_head *h)
74 {
75 struct nf_hook_entries_rcu_head *head;
76
77 head = container_of(h, struct nf_hook_entries_rcu_head, head);
78 kvfree(head->allocation);
79 }
80
nf_hook_entries_free(struct nf_hook_entries * e)81 static void nf_hook_entries_free(struct nf_hook_entries *e)
82 {
83 struct nf_hook_entries_rcu_head *head;
84 struct nf_hook_ops **ops;
85 unsigned int num;
86
87 if (!e)
88 return;
89
90 num = e->num_hook_entries;
91 ops = nf_hook_entries_get_hook_ops(e);
92 head = (void *)&ops[num];
93 head->allocation = e;
94 call_rcu(&head->head, __nf_hook_entries_free);
95 }
96
accept_all(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)97 static unsigned int accept_all(void *priv,
98 struct sk_buff *skb,
99 const struct nf_hook_state *state)
100 {
101 return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */
102 }
103
104 static const struct nf_hook_ops dummy_ops = {
105 .hook = accept_all,
106 .priority = INT_MIN,
107 };
108
109 static struct nf_hook_entries *
nf_hook_entries_grow(const struct nf_hook_entries * old,const struct nf_hook_ops * reg)110 nf_hook_entries_grow(const struct nf_hook_entries *old,
111 const struct nf_hook_ops *reg)
112 {
113 unsigned int i, alloc_entries, nhooks, old_entries;
114 struct nf_hook_ops **orig_ops = NULL;
115 struct nf_hook_ops **new_ops;
116 struct nf_hook_entries *new;
117 bool inserted = false;
118
119 alloc_entries = 1;
120 old_entries = old ? old->num_hook_entries : 0;
121
122 if (old) {
123 orig_ops = nf_hook_entries_get_hook_ops(old);
124
125 for (i = 0; i < old_entries; i++) {
126 if (orig_ops[i] != &dummy_ops)
127 alloc_entries++;
128 }
129 }
130
131 if (alloc_entries > MAX_HOOK_COUNT)
132 return ERR_PTR(-E2BIG);
133
134 new = allocate_hook_entries_size(alloc_entries);
135 if (!new)
136 return ERR_PTR(-ENOMEM);
137
138 new_ops = nf_hook_entries_get_hook_ops(new);
139
140 i = 0;
141 nhooks = 0;
142 while (i < old_entries) {
143 if (orig_ops[i] == &dummy_ops) {
144 ++i;
145 continue;
146 }
147
148 if (inserted || reg->priority > orig_ops[i]->priority) {
149 new_ops[nhooks] = (void *)orig_ops[i];
150 new->hooks[nhooks] = old->hooks[i];
151 i++;
152 } else {
153 new_ops[nhooks] = (void *)reg;
154 new->hooks[nhooks].hook = reg->hook;
155 new->hooks[nhooks].priv = reg->priv;
156 inserted = true;
157 }
158 nhooks++;
159 }
160
161 if (!inserted) {
162 new_ops[nhooks] = (void *)reg;
163 new->hooks[nhooks].hook = reg->hook;
164 new->hooks[nhooks].priv = reg->priv;
165 }
166
167 return new;
168 }
169
hooks_validate(const struct nf_hook_entries * hooks)170 static void hooks_validate(const struct nf_hook_entries *hooks)
171 {
172 #ifdef CONFIG_DEBUG_MISC
173 struct nf_hook_ops **orig_ops;
174 int prio = INT_MIN;
175 size_t i = 0;
176
177 orig_ops = nf_hook_entries_get_hook_ops(hooks);
178
179 for (i = 0; i < hooks->num_hook_entries; i++) {
180 if (orig_ops[i] == &dummy_ops)
181 continue;
182
183 WARN_ON(orig_ops[i]->priority < prio);
184
185 if (orig_ops[i]->priority > prio)
186 prio = orig_ops[i]->priority;
187 }
188 #endif
189 }
190
nf_hook_entries_insert_raw(struct nf_hook_entries __rcu ** pp,const struct nf_hook_ops * reg)191 int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
192 const struct nf_hook_ops *reg)
193 {
194 struct nf_hook_entries *new_hooks;
195 struct nf_hook_entries *p;
196
197 p = rcu_dereference_raw(*pp);
198 new_hooks = nf_hook_entries_grow(p, reg);
199 if (IS_ERR(new_hooks))
200 return PTR_ERR(new_hooks);
201
202 hooks_validate(new_hooks);
203
204 rcu_assign_pointer(*pp, new_hooks);
205
206 BUG_ON(p == new_hooks);
207 nf_hook_entries_free(p);
208 return 0;
209 }
210 EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw);
211
212 /*
213 * __nf_hook_entries_try_shrink - try to shrink hook array
214 *
215 * @old -- current hook blob at @pp
216 * @pp -- location of hook blob
217 *
218 * Hook unregistration must always succeed, so to-be-removed hooks
219 * are replaced by a dummy one that will just move to next hook.
220 *
221 * This counts the current dummy hooks, attempts to allocate new blob,
222 * copies the live hooks, then replaces and discards old one.
223 *
224 * return values:
225 *
226 * Returns address to free, or NULL.
227 */
__nf_hook_entries_try_shrink(struct nf_hook_entries * old,struct nf_hook_entries __rcu ** pp)228 static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old,
229 struct nf_hook_entries __rcu **pp)
230 {
231 unsigned int i, j, skip = 0, hook_entries;
232 struct nf_hook_entries *new = NULL;
233 struct nf_hook_ops **orig_ops;
234 struct nf_hook_ops **new_ops;
235
236 if (WARN_ON_ONCE(!old))
237 return NULL;
238
239 orig_ops = nf_hook_entries_get_hook_ops(old);
240 for (i = 0; i < old->num_hook_entries; i++) {
241 if (orig_ops[i] == &dummy_ops)
242 skip++;
243 }
244
245 /* if skip == hook_entries all hooks have been removed */
246 hook_entries = old->num_hook_entries;
247 if (skip == hook_entries)
248 goto out_assign;
249
250 if (skip == 0)
251 return NULL;
252
253 hook_entries -= skip;
254 new = allocate_hook_entries_size(hook_entries);
255 if (!new)
256 return NULL;
257
258 new_ops = nf_hook_entries_get_hook_ops(new);
259 for (i = 0, j = 0; i < old->num_hook_entries; i++) {
260 if (orig_ops[i] == &dummy_ops)
261 continue;
262 new->hooks[j] = old->hooks[i];
263 new_ops[j] = (void *)orig_ops[i];
264 j++;
265 }
266 hooks_validate(new);
267 out_assign:
268 rcu_assign_pointer(*pp, new);
269 return old;
270 }
271
272 static struct nf_hook_entries __rcu **
nf_hook_entry_head(struct net * net,int pf,unsigned int hooknum,struct net_device * dev)273 nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
274 struct net_device *dev)
275 {
276 switch (pf) {
277 case NFPROTO_NETDEV:
278 break;
279 #ifdef CONFIG_NETFILTER_FAMILY_ARP
280 case NFPROTO_ARP:
281 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
282 return NULL;
283 return net->nf.hooks_arp + hooknum;
284 #endif
285 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
286 case NFPROTO_BRIDGE:
287 if (WARN_ON_ONCE(hooknum >= NF_INET_NUMHOOKS))
288 return NULL;
289 return get_nf_hooks_bridge(net) + hooknum;
290 #endif
291 #ifdef CONFIG_NETFILTER_INGRESS
292 case NFPROTO_INET:
293 if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS))
294 return NULL;
295 if (!dev || dev_net(dev) != net) {
296 WARN_ON_ONCE(1);
297 return NULL;
298 }
299 return &dev->nf_hooks_ingress;
300 #endif
301 case NFPROTO_IPV4:
302 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
303 return NULL;
304 return net->nf.hooks_ipv4 + hooknum;
305 case NFPROTO_IPV6:
306 if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
307 return NULL;
308 return net->nf.hooks_ipv6 + hooknum;
309 default:
310 WARN_ON_ONCE(1);
311 return NULL;
312 }
313
314 #ifdef CONFIG_NETFILTER_INGRESS
315 if (hooknum == NF_NETDEV_INGRESS) {
316 if (dev && dev_net(dev) == net)
317 return &dev->nf_hooks_ingress;
318 }
319 #endif
320 WARN_ON_ONCE(1);
321 return NULL;
322 }
323
nf_ingress_check(struct net * net,const struct nf_hook_ops * reg,int hooknum)324 static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg,
325 int hooknum)
326 {
327 #ifndef CONFIG_NETFILTER_INGRESS
328 if (reg->hooknum == hooknum)
329 return -EOPNOTSUPP;
330 #endif
331 if (reg->hooknum != hooknum ||
332 !reg->dev || dev_net(reg->dev) != net)
333 return -EINVAL;
334
335 return 0;
336 }
337
nf_ingress_hook(const struct nf_hook_ops * reg,int pf)338 static inline bool nf_ingress_hook(const struct nf_hook_ops *reg, int pf)
339 {
340 if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) ||
341 (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS))
342 return true;
343
344 return false;
345 }
346
nf_static_key_inc(const struct nf_hook_ops * reg,int pf)347 static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf)
348 {
349 #ifdef CONFIG_JUMP_LABEL
350 int hooknum;
351
352 if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
353 pf = NFPROTO_NETDEV;
354 hooknum = NF_NETDEV_INGRESS;
355 } else {
356 hooknum = reg->hooknum;
357 }
358 static_key_slow_inc(&nf_hooks_needed[pf][hooknum]);
359 #endif
360 }
361
nf_static_key_dec(const struct nf_hook_ops * reg,int pf)362 static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf)
363 {
364 #ifdef CONFIG_JUMP_LABEL
365 int hooknum;
366
367 if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
368 pf = NFPROTO_NETDEV;
369 hooknum = NF_NETDEV_INGRESS;
370 } else {
371 hooknum = reg->hooknum;
372 }
373 static_key_slow_dec(&nf_hooks_needed[pf][hooknum]);
374 #endif
375 }
376
__nf_register_net_hook(struct net * net,int pf,const struct nf_hook_ops * reg)377 static int __nf_register_net_hook(struct net *net, int pf,
378 const struct nf_hook_ops *reg)
379 {
380 struct nf_hook_entries *p, *new_hooks;
381 struct nf_hook_entries __rcu **pp;
382 int err;
383
384 switch (pf) {
385 case NFPROTO_NETDEV:
386 err = nf_ingress_check(net, reg, NF_NETDEV_INGRESS);
387 if (err < 0)
388 return err;
389 break;
390 case NFPROTO_INET:
391 if (reg->hooknum != NF_INET_INGRESS)
392 break;
393
394 err = nf_ingress_check(net, reg, NF_INET_INGRESS);
395 if (err < 0)
396 return err;
397 break;
398 }
399
400 pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
401 if (!pp)
402 return -EINVAL;
403
404 mutex_lock(&nf_hook_mutex);
405
406 p = nf_entry_dereference(*pp);
407 new_hooks = nf_hook_entries_grow(p, reg);
408
409 if (!IS_ERR(new_hooks)) {
410 hooks_validate(new_hooks);
411 rcu_assign_pointer(*pp, new_hooks);
412 }
413
414 mutex_unlock(&nf_hook_mutex);
415 if (IS_ERR(new_hooks))
416 return PTR_ERR(new_hooks);
417
418 #ifdef CONFIG_NETFILTER_INGRESS
419 if (nf_ingress_hook(reg, pf))
420 net_inc_ingress_queue();
421 #endif
422 nf_static_key_inc(reg, pf);
423
424 BUG_ON(p == new_hooks);
425 nf_hook_entries_free(p);
426 return 0;
427 }
428
429 /*
430 * nf_remove_net_hook - remove a hook from blob
431 *
432 * @oldp: current address of hook blob
433 * @unreg: hook to unregister
434 *
435 * This cannot fail, hook unregistration must always succeed.
436 * Therefore replace the to-be-removed hook with a dummy hook.
437 */
nf_remove_net_hook(struct nf_hook_entries * old,const struct nf_hook_ops * unreg)438 static bool nf_remove_net_hook(struct nf_hook_entries *old,
439 const struct nf_hook_ops *unreg)
440 {
441 struct nf_hook_ops **orig_ops;
442 unsigned int i;
443
444 orig_ops = nf_hook_entries_get_hook_ops(old);
445 for (i = 0; i < old->num_hook_entries; i++) {
446 if (orig_ops[i] != unreg)
447 continue;
448 WRITE_ONCE(old->hooks[i].hook, accept_all);
449 WRITE_ONCE(orig_ops[i], (void *)&dummy_ops);
450 return true;
451 }
452
453 return false;
454 }
455
__nf_unregister_net_hook(struct net * net,int pf,const struct nf_hook_ops * reg)456 static void __nf_unregister_net_hook(struct net *net, int pf,
457 const struct nf_hook_ops *reg)
458 {
459 struct nf_hook_entries __rcu **pp;
460 struct nf_hook_entries *p;
461
462 pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
463 if (!pp)
464 return;
465
466 mutex_lock(&nf_hook_mutex);
467
468 p = nf_entry_dereference(*pp);
469 if (WARN_ON_ONCE(!p)) {
470 mutex_unlock(&nf_hook_mutex);
471 return;
472 }
473
474 if (nf_remove_net_hook(p, reg)) {
475 #ifdef CONFIG_NETFILTER_INGRESS
476 if (nf_ingress_hook(reg, pf))
477 net_dec_ingress_queue();
478 #endif
479 nf_static_key_dec(reg, pf);
480 } else {
481 WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
482 }
483
484 p = __nf_hook_entries_try_shrink(p, pp);
485 mutex_unlock(&nf_hook_mutex);
486 if (!p)
487 return;
488
489 nf_queue_nf_hook_drop(net);
490 nf_hook_entries_free(p);
491 }
492
nf_unregister_net_hook(struct net * net,const struct nf_hook_ops * reg)493 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
494 {
495 if (reg->pf == NFPROTO_INET) {
496 if (reg->hooknum == NF_INET_INGRESS) {
497 __nf_unregister_net_hook(net, NFPROTO_INET, reg);
498 } else {
499 __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
500 __nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
501 }
502 } else {
503 __nf_unregister_net_hook(net, reg->pf, reg);
504 }
505 }
506 EXPORT_SYMBOL(nf_unregister_net_hook);
507
nf_hook_entries_delete_raw(struct nf_hook_entries __rcu ** pp,const struct nf_hook_ops * reg)508 void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
509 const struct nf_hook_ops *reg)
510 {
511 struct nf_hook_entries *p;
512
513 p = rcu_dereference_raw(*pp);
514 if (nf_remove_net_hook(p, reg)) {
515 p = __nf_hook_entries_try_shrink(p, pp);
516 nf_hook_entries_free(p);
517 }
518 }
519 EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw);
520
nf_register_net_hook(struct net * net,const struct nf_hook_ops * reg)521 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
522 {
523 int err;
524
525 if (reg->pf == NFPROTO_INET) {
526 if (reg->hooknum == NF_INET_INGRESS) {
527 err = __nf_register_net_hook(net, NFPROTO_INET, reg);
528 if (err < 0)
529 return err;
530 } else {
531 err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
532 if (err < 0)
533 return err;
534
535 err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
536 if (err < 0) {
537 __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
538 return err;
539 }
540 }
541 } else {
542 err = __nf_register_net_hook(net, reg->pf, reg);
543 if (err < 0)
544 return err;
545 }
546
547 return 0;
548 }
549 EXPORT_SYMBOL(nf_register_net_hook);
550
nf_register_net_hooks(struct net * net,const struct nf_hook_ops * reg,unsigned int n)551 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
552 unsigned int n)
553 {
554 unsigned int i;
555 int err = 0;
556
557 for (i = 0; i < n; i++) {
558 err = nf_register_net_hook(net, ®[i]);
559 if (err)
560 goto err;
561 }
562 return err;
563
564 err:
565 if (i > 0)
566 nf_unregister_net_hooks(net, reg, i);
567 return err;
568 }
569 EXPORT_SYMBOL(nf_register_net_hooks);
570
nf_unregister_net_hooks(struct net * net,const struct nf_hook_ops * reg,unsigned int hookcount)571 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
572 unsigned int hookcount)
573 {
574 unsigned int i;
575
576 for (i = 0; i < hookcount; i++)
577 nf_unregister_net_hook(net, ®[i]);
578 }
579 EXPORT_SYMBOL(nf_unregister_net_hooks);
580
581 /* Returns 1 if okfn() needs to be executed by the caller,
582 * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
nf_hook_slow(struct sk_buff * skb,struct nf_hook_state * state,const struct nf_hook_entries * e,unsigned int s)583 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
584 const struct nf_hook_entries *e, unsigned int s)
585 {
586 unsigned int verdict;
587 int ret;
588
589 for (; s < e->num_hook_entries; s++) {
590 verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
591 switch (verdict & NF_VERDICT_MASK) {
592 case NF_ACCEPT:
593 break;
594 case NF_DROP:
595 kfree_skb_reason(skb,
596 SKB_DROP_REASON_NETFILTER_DROP);
597 ret = NF_DROP_GETERR(verdict);
598 if (ret == 0)
599 ret = -EPERM;
600 return ret;
601 case NF_QUEUE:
602 ret = nf_queue(skb, state, s, verdict);
603 if (ret == 1)
604 continue;
605 return ret;
606 default:
607 /* Implicit handling for NF_STOLEN, as well as any other
608 * non conventional verdicts.
609 */
610 return 0;
611 }
612 }
613
614 return 1;
615 }
616 EXPORT_SYMBOL(nf_hook_slow);
617
nf_hook_slow_list(struct list_head * head,struct nf_hook_state * state,const struct nf_hook_entries * e)618 void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
619 const struct nf_hook_entries *e)
620 {
621 struct sk_buff *skb, *next;
622 struct list_head sublist;
623 int ret;
624
625 INIT_LIST_HEAD(&sublist);
626
627 list_for_each_entry_safe(skb, next, head, list) {
628 skb_list_del_init(skb);
629 ret = nf_hook_slow(skb, state, e, 0);
630 if (ret == 1)
631 list_add_tail(&skb->list, &sublist);
632 }
633 /* Put passed packets back on main list */
634 list_splice(&sublist, head);
635 }
636 EXPORT_SYMBOL(nf_hook_slow_list);
637
638 /* This needs to be compiled in any case to avoid dependencies between the
639 * nfnetlink_queue code and nf_conntrack.
640 */
641 struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
642 EXPORT_SYMBOL_GPL(nfnl_ct_hook);
643
644 struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
645 EXPORT_SYMBOL_GPL(nf_ct_hook);
646
647 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
648 /* This does not belong here, but locally generated errors need it if connection
649 tracking in use: without this, connection may not be in hash table, and hence
650 manufactured ICMP or RST packets will not be associated with it. */
651 void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
652 __rcu __read_mostly;
653 EXPORT_SYMBOL(ip_ct_attach);
654
655 struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
656 EXPORT_SYMBOL_GPL(nf_nat_hook);
657
nf_ct_attach(struct sk_buff * new,const struct sk_buff * skb)658 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
659 {
660 void (*attach)(struct sk_buff *, const struct sk_buff *);
661
662 if (skb->_nfct) {
663 rcu_read_lock();
664 attach = rcu_dereference(ip_ct_attach);
665 if (attach)
666 attach(new, skb);
667 rcu_read_unlock();
668 }
669 }
670 EXPORT_SYMBOL(nf_ct_attach);
671
nf_conntrack_destroy(struct nf_conntrack * nfct)672 void nf_conntrack_destroy(struct nf_conntrack *nfct)
673 {
674 struct nf_ct_hook *ct_hook;
675
676 rcu_read_lock();
677 ct_hook = rcu_dereference(nf_ct_hook);
678 if (ct_hook)
679 ct_hook->destroy(nfct);
680 rcu_read_unlock();
681
682 WARN_ON(!ct_hook);
683 }
684 EXPORT_SYMBOL(nf_conntrack_destroy);
685
nf_ct_get_tuple_skb(struct nf_conntrack_tuple * dst_tuple,const struct sk_buff * skb)686 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
687 const struct sk_buff *skb)
688 {
689 struct nf_ct_hook *ct_hook;
690 bool ret = false;
691
692 rcu_read_lock();
693 ct_hook = rcu_dereference(nf_ct_hook);
694 if (ct_hook)
695 ret = ct_hook->get_tuple_skb(dst_tuple, skb);
696 rcu_read_unlock();
697 return ret;
698 }
699 EXPORT_SYMBOL(nf_ct_get_tuple_skb);
700
701 /* Built-in default zone used e.g. by modules. */
702 const struct nf_conntrack_zone nf_ct_zone_dflt = {
703 .id = NF_CT_DEFAULT_ZONE_ID,
704 .dir = NF_CT_DEFAULT_ZONE_DIR,
705 };
706 EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
707 #endif /* CONFIG_NF_CONNTRACK */
708
709 static void __net_init
__netfilter_net_init(struct nf_hook_entries __rcu ** e,int max)710 __netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
711 {
712 int h;
713
714 for (h = 0; h < max; h++)
715 RCU_INIT_POINTER(e[h], NULL);
716 }
717
netfilter_net_init(struct net * net)718 static int __net_init netfilter_net_init(struct net *net)
719 {
720 __netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
721 __netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
722 #ifdef CONFIG_NETFILTER_FAMILY_ARP
723 __netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
724 #endif
725 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
726 __netfilter_net_init(get_nf_hooks_bridge(net), NF_INET_NUMHOOKS);
727 #endif
728 #ifdef CONFIG_PROC_FS
729 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
730 net->proc_net);
731 if (!net->nf.proc_netfilter) {
732 if (!net_eq(net, &init_net))
733 pr_err("cannot create netfilter proc entry");
734
735 return -ENOMEM;
736 }
737 #endif
738
739 return 0;
740 }
741
netfilter_net_exit(struct net * net)742 static void __net_exit netfilter_net_exit(struct net *net)
743 {
744 remove_proc_entry("netfilter", net->proc_net);
745 }
746
747 static struct pernet_operations netfilter_net_ops = {
748 .init = netfilter_net_init,
749 .exit = netfilter_net_exit,
750 };
751
netfilter_init(void)752 int __init netfilter_init(void)
753 {
754 int ret;
755
756 ret = register_pernet_subsys(&netfilter_net_ops);
757 if (ret < 0)
758 goto err;
759
760 ret = netfilter_log_init();
761 if (ret < 0)
762 goto err_pernet;
763
764 return 0;
765 err_pernet:
766 unregister_pernet_subsys(&netfilter_net_ops);
767 err:
768 return ret;
769 }
770