• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* netfilter.c: look after the filters for various protocols.
2  * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3  *
4  * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5  * way.
6  *
7  * This code is GPL.
8  */
9 #include <linux/kernel.h>
10 #include <linux/netfilter.h>
11 #include <net/protocol.h>
12 #include <linux/init.h>
13 #include <linux/skbuff.h>
14 #include <linux/wait.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/if.h>
18 #include <linux/netdevice.h>
19 #include <linux/netfilter_ipv6.h>
20 #include <linux/inetdevice.h>
21 #include <linux/proc_fs.h>
22 #include <linux/mutex.h>
23 #include <linux/mm.h>
24 #include <linux/rcupdate.h>
25 #include <net/net_namespace.h>
26 #include <net/netfilter/nf_queue.h>
27 #include <net/sock.h>
28 
29 #include "nf_internals.h"
30 
31 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
32 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
33 
34 DEFINE_PER_CPU(bool, nf_skb_duplicated);
35 EXPORT_SYMBOL_GPL(nf_skb_duplicated);
36 
37 #ifdef CONFIG_JUMP_LABEL
38 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
39 EXPORT_SYMBOL(nf_hooks_needed);
40 #endif
41 
42 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
43 struct nf_hook_entries __rcu *init_nf_hooks_bridge[NF_INET_NUMHOOKS];
44 struct nf_hook_entries __rcu **init_nf_hooks_bridgep = &init_nf_hooks_bridge[0];
45 EXPORT_SYMBOL_GPL(init_nf_hooks_bridgep);
46 #endif
47 
48 static DEFINE_MUTEX(nf_hook_mutex);
49 
50 /* max hooks per family/hooknum */
51 #define MAX_HOOK_COUNT		1024
52 
53 #define nf_entry_dereference(e) \
54 	rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
55 
allocate_hook_entries_size(u16 num)56 static struct nf_hook_entries *allocate_hook_entries_size(u16 num)
57 {
58 	struct nf_hook_entries *e;
59 	size_t alloc = sizeof(*e) +
60 		       sizeof(struct nf_hook_entry) * num +
61 		       sizeof(struct nf_hook_ops *) * num +
62 		       sizeof(struct nf_hook_entries_rcu_head);
63 
64 	if (num == 0)
65 		return NULL;
66 
67 	e = kvzalloc(alloc, GFP_KERNEL_ACCOUNT);
68 	if (e)
69 		e->num_hook_entries = num;
70 	return e;
71 }
72 
__nf_hook_entries_free(struct rcu_head * h)73 static void __nf_hook_entries_free(struct rcu_head *h)
74 {
75 	struct nf_hook_entries_rcu_head *head;
76 
77 	head = container_of(h, struct nf_hook_entries_rcu_head, head);
78 	kvfree(head->allocation);
79 }
80 
nf_hook_entries_free(struct nf_hook_entries * e)81 static void nf_hook_entries_free(struct nf_hook_entries *e)
82 {
83 	struct nf_hook_entries_rcu_head *head;
84 	struct nf_hook_ops **ops;
85 	unsigned int num;
86 
87 	if (!e)
88 		return;
89 
90 	num = e->num_hook_entries;
91 	ops = nf_hook_entries_get_hook_ops(e);
92 	head = (void *)&ops[num];
93 	head->allocation = e;
94 	call_rcu(&head->head, __nf_hook_entries_free);
95 }
96 
accept_all(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)97 static unsigned int accept_all(void *priv,
98 			       struct sk_buff *skb,
99 			       const struct nf_hook_state *state)
100 {
101 	return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */
102 }
103 
104 static const struct nf_hook_ops dummy_ops = {
105 	.hook = accept_all,
106 	.priority = INT_MIN,
107 };
108 
109 static struct nf_hook_entries *
nf_hook_entries_grow(const struct nf_hook_entries * old,const struct nf_hook_ops * reg)110 nf_hook_entries_grow(const struct nf_hook_entries *old,
111 		     const struct nf_hook_ops *reg)
112 {
113 	unsigned int i, alloc_entries, nhooks, old_entries;
114 	struct nf_hook_ops **orig_ops = NULL;
115 	struct nf_hook_ops **new_ops;
116 	struct nf_hook_entries *new;
117 	bool inserted = false;
118 
119 	alloc_entries = 1;
120 	old_entries = old ? old->num_hook_entries : 0;
121 
122 	if (old) {
123 		orig_ops = nf_hook_entries_get_hook_ops(old);
124 
125 		for (i = 0; i < old_entries; i++) {
126 			if (orig_ops[i] != &dummy_ops)
127 				alloc_entries++;
128 		}
129 	}
130 
131 	if (alloc_entries > MAX_HOOK_COUNT)
132 		return ERR_PTR(-E2BIG);
133 
134 	new = allocate_hook_entries_size(alloc_entries);
135 	if (!new)
136 		return ERR_PTR(-ENOMEM);
137 
138 	new_ops = nf_hook_entries_get_hook_ops(new);
139 
140 	i = 0;
141 	nhooks = 0;
142 	while (i < old_entries) {
143 		if (orig_ops[i] == &dummy_ops) {
144 			++i;
145 			continue;
146 		}
147 
148 		if (inserted || reg->priority > orig_ops[i]->priority) {
149 			new_ops[nhooks] = (void *)orig_ops[i];
150 			new->hooks[nhooks] = old->hooks[i];
151 			i++;
152 		} else {
153 			new_ops[nhooks] = (void *)reg;
154 			new->hooks[nhooks].hook = reg->hook;
155 			new->hooks[nhooks].priv = reg->priv;
156 			inserted = true;
157 		}
158 		nhooks++;
159 	}
160 
161 	if (!inserted) {
162 		new_ops[nhooks] = (void *)reg;
163 		new->hooks[nhooks].hook = reg->hook;
164 		new->hooks[nhooks].priv = reg->priv;
165 	}
166 
167 	return new;
168 }
169 
hooks_validate(const struct nf_hook_entries * hooks)170 static void hooks_validate(const struct nf_hook_entries *hooks)
171 {
172 #ifdef CONFIG_DEBUG_MISC
173 	struct nf_hook_ops **orig_ops;
174 	int prio = INT_MIN;
175 	size_t i = 0;
176 
177 	orig_ops = nf_hook_entries_get_hook_ops(hooks);
178 
179 	for (i = 0; i < hooks->num_hook_entries; i++) {
180 		if (orig_ops[i] == &dummy_ops)
181 			continue;
182 
183 		WARN_ON(orig_ops[i]->priority < prio);
184 
185 		if (orig_ops[i]->priority > prio)
186 			prio = orig_ops[i]->priority;
187 	}
188 #endif
189 }
190 
nf_hook_entries_insert_raw(struct nf_hook_entries __rcu ** pp,const struct nf_hook_ops * reg)191 int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp,
192 				const struct nf_hook_ops *reg)
193 {
194 	struct nf_hook_entries *new_hooks;
195 	struct nf_hook_entries *p;
196 
197 	p = rcu_dereference_raw(*pp);
198 	new_hooks = nf_hook_entries_grow(p, reg);
199 	if (IS_ERR(new_hooks))
200 		return PTR_ERR(new_hooks);
201 
202 	hooks_validate(new_hooks);
203 
204 	rcu_assign_pointer(*pp, new_hooks);
205 
206 	BUG_ON(p == new_hooks);
207 	nf_hook_entries_free(p);
208 	return 0;
209 }
210 EXPORT_SYMBOL_GPL(nf_hook_entries_insert_raw);
211 
212 /*
213  * __nf_hook_entries_try_shrink - try to shrink hook array
214  *
215  * @old -- current hook blob at @pp
216  * @pp -- location of hook blob
217  *
218  * Hook unregistration must always succeed, so to-be-removed hooks
219  * are replaced by a dummy one that will just move to next hook.
220  *
221  * This counts the current dummy hooks, attempts to allocate new blob,
222  * copies the live hooks, then replaces and discards old one.
223  *
224  * return values:
225  *
226  * Returns address to free, or NULL.
227  */
__nf_hook_entries_try_shrink(struct nf_hook_entries * old,struct nf_hook_entries __rcu ** pp)228 static void *__nf_hook_entries_try_shrink(struct nf_hook_entries *old,
229 					  struct nf_hook_entries __rcu **pp)
230 {
231 	unsigned int i, j, skip = 0, hook_entries;
232 	struct nf_hook_entries *new = NULL;
233 	struct nf_hook_ops **orig_ops;
234 	struct nf_hook_ops **new_ops;
235 
236 	if (WARN_ON_ONCE(!old))
237 		return NULL;
238 
239 	orig_ops = nf_hook_entries_get_hook_ops(old);
240 	for (i = 0; i < old->num_hook_entries; i++) {
241 		if (orig_ops[i] == &dummy_ops)
242 			skip++;
243 	}
244 
245 	/* if skip == hook_entries all hooks have been removed */
246 	hook_entries = old->num_hook_entries;
247 	if (skip == hook_entries)
248 		goto out_assign;
249 
250 	if (skip == 0)
251 		return NULL;
252 
253 	hook_entries -= skip;
254 	new = allocate_hook_entries_size(hook_entries);
255 	if (!new)
256 		return NULL;
257 
258 	new_ops = nf_hook_entries_get_hook_ops(new);
259 	for (i = 0, j = 0; i < old->num_hook_entries; i++) {
260 		if (orig_ops[i] == &dummy_ops)
261 			continue;
262 		new->hooks[j] = old->hooks[i];
263 		new_ops[j] = (void *)orig_ops[i];
264 		j++;
265 	}
266 	hooks_validate(new);
267 out_assign:
268 	rcu_assign_pointer(*pp, new);
269 	return old;
270 }
271 
272 static struct nf_hook_entries __rcu **
nf_hook_entry_head(struct net * net,int pf,unsigned int hooknum,struct net_device * dev)273 nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
274 		   struct net_device *dev)
275 {
276 	switch (pf) {
277 	case NFPROTO_NETDEV:
278 		break;
279 #ifdef CONFIG_NETFILTER_FAMILY_ARP
280 	case NFPROTO_ARP:
281 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_arp) <= hooknum))
282 			return NULL;
283 		return net->nf.hooks_arp + hooknum;
284 #endif
285 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
286 	case NFPROTO_BRIDGE:
287 		if (WARN_ON_ONCE(hooknum >= NF_INET_NUMHOOKS))
288 			return NULL;
289 		return get_nf_hooks_bridge(net) + hooknum;
290 #endif
291 #ifdef CONFIG_NETFILTER_INGRESS
292 	case NFPROTO_INET:
293 		if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS))
294 			return NULL;
295 		if (!dev || dev_net(dev) != net) {
296 			WARN_ON_ONCE(1);
297 			return NULL;
298 		}
299 		return &dev->nf_hooks_ingress;
300 #endif
301 	case NFPROTO_IPV4:
302 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
303 			return NULL;
304 		return net->nf.hooks_ipv4 + hooknum;
305 	case NFPROTO_IPV6:
306 		if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv6) <= hooknum))
307 			return NULL;
308 		return net->nf.hooks_ipv6 + hooknum;
309 	default:
310 		WARN_ON_ONCE(1);
311 		return NULL;
312 	}
313 
314 #ifdef CONFIG_NETFILTER_INGRESS
315 	if (hooknum == NF_NETDEV_INGRESS) {
316 		if (dev && dev_net(dev) == net)
317 			return &dev->nf_hooks_ingress;
318 	}
319 #endif
320 #ifdef CONFIG_NETFILTER_EGRESS
321 	if (hooknum == NF_NETDEV_EGRESS) {
322 		if (dev && dev_net(dev) == net)
323 			return &dev->nf_hooks_egress;
324 	}
325 #endif
326 	WARN_ON_ONCE(1);
327 	return NULL;
328 }
329 
nf_ingress_check(struct net * net,const struct nf_hook_ops * reg,int hooknum)330 static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg,
331 			    int hooknum)
332 {
333 #ifndef CONFIG_NETFILTER_INGRESS
334 	if (reg->hooknum == hooknum)
335 		return -EOPNOTSUPP;
336 #endif
337 	if (reg->hooknum != hooknum ||
338 	    !reg->dev || dev_net(reg->dev) != net)
339 		return -EINVAL;
340 
341 	return 0;
342 }
343 
nf_ingress_hook(const struct nf_hook_ops * reg,int pf)344 static inline bool __maybe_unused nf_ingress_hook(const struct nf_hook_ops *reg,
345 						  int pf)
346 {
347 	if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) ||
348 	    (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS))
349 		return true;
350 
351 	return false;
352 }
353 
nf_egress_hook(const struct nf_hook_ops * reg,int pf)354 static inline bool __maybe_unused nf_egress_hook(const struct nf_hook_ops *reg,
355 						 int pf)
356 {
357 	return pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_EGRESS;
358 }
359 
nf_static_key_inc(const struct nf_hook_ops * reg,int pf)360 static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf)
361 {
362 #ifdef CONFIG_JUMP_LABEL
363 	int hooknum;
364 
365 	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
366 		pf = NFPROTO_NETDEV;
367 		hooknum = NF_NETDEV_INGRESS;
368 	} else {
369 		hooknum = reg->hooknum;
370 	}
371 	static_key_slow_inc(&nf_hooks_needed[pf][hooknum]);
372 #endif
373 }
374 
nf_static_key_dec(const struct nf_hook_ops * reg,int pf)375 static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf)
376 {
377 #ifdef CONFIG_JUMP_LABEL
378 	int hooknum;
379 
380 	if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
381 		pf = NFPROTO_NETDEV;
382 		hooknum = NF_NETDEV_INGRESS;
383 	} else {
384 		hooknum = reg->hooknum;
385 	}
386 	static_key_slow_dec(&nf_hooks_needed[pf][hooknum]);
387 #endif
388 }
389 
__nf_register_net_hook(struct net * net,int pf,const struct nf_hook_ops * reg)390 static int __nf_register_net_hook(struct net *net, int pf,
391 				  const struct nf_hook_ops *reg)
392 {
393 	struct nf_hook_entries *p, *new_hooks;
394 	struct nf_hook_entries __rcu **pp;
395 	int err;
396 
397 	switch (pf) {
398 	case NFPROTO_NETDEV:
399 #ifndef CONFIG_NETFILTER_INGRESS
400 		if (reg->hooknum == NF_NETDEV_INGRESS)
401 			return -EOPNOTSUPP;
402 #endif
403 #ifndef CONFIG_NETFILTER_EGRESS
404 		if (reg->hooknum == NF_NETDEV_EGRESS)
405 			return -EOPNOTSUPP;
406 #endif
407 		if ((reg->hooknum != NF_NETDEV_INGRESS &&
408 		     reg->hooknum != NF_NETDEV_EGRESS) ||
409 		    !reg->dev || dev_net(reg->dev) != net)
410 			return -EINVAL;
411 		break;
412 	case NFPROTO_INET:
413 		if (reg->hooknum != NF_INET_INGRESS)
414 			break;
415 
416 		err = nf_ingress_check(net, reg, NF_INET_INGRESS);
417 		if (err < 0)
418 			return err;
419 		break;
420 	}
421 
422 	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
423 	if (!pp)
424 		return -EINVAL;
425 
426 	mutex_lock(&nf_hook_mutex);
427 
428 	p = nf_entry_dereference(*pp);
429 	new_hooks = nf_hook_entries_grow(p, reg);
430 
431 	if (!IS_ERR(new_hooks)) {
432 		hooks_validate(new_hooks);
433 		rcu_assign_pointer(*pp, new_hooks);
434 	}
435 
436 	mutex_unlock(&nf_hook_mutex);
437 	if (IS_ERR(new_hooks))
438 		return PTR_ERR(new_hooks);
439 
440 #ifdef CONFIG_NETFILTER_INGRESS
441 	if (nf_ingress_hook(reg, pf))
442 		net_inc_ingress_queue();
443 #endif
444 #ifdef CONFIG_NETFILTER_EGRESS
445 	if (nf_egress_hook(reg, pf))
446 		net_inc_egress_queue();
447 #endif
448 	nf_static_key_inc(reg, pf);
449 
450 	BUG_ON(p == new_hooks);
451 	nf_hook_entries_free(p);
452 	return 0;
453 }
454 
455 /*
456  * nf_remove_net_hook - remove a hook from blob
457  *
458  * @oldp: current address of hook blob
459  * @unreg: hook to unregister
460  *
461  * This cannot fail, hook unregistration must always succeed.
462  * Therefore replace the to-be-removed hook with a dummy hook.
463  */
nf_remove_net_hook(struct nf_hook_entries * old,const struct nf_hook_ops * unreg)464 static bool nf_remove_net_hook(struct nf_hook_entries *old,
465 			       const struct nf_hook_ops *unreg)
466 {
467 	struct nf_hook_ops **orig_ops;
468 	unsigned int i;
469 
470 	orig_ops = nf_hook_entries_get_hook_ops(old);
471 	for (i = 0; i < old->num_hook_entries; i++) {
472 		if (orig_ops[i] != unreg)
473 			continue;
474 		WRITE_ONCE(old->hooks[i].hook, accept_all);
475 		WRITE_ONCE(orig_ops[i], (void *)&dummy_ops);
476 		return true;
477 	}
478 
479 	return false;
480 }
481 
__nf_unregister_net_hook(struct net * net,int pf,const struct nf_hook_ops * reg)482 static void __nf_unregister_net_hook(struct net *net, int pf,
483 				     const struct nf_hook_ops *reg)
484 {
485 	struct nf_hook_entries __rcu **pp;
486 	struct nf_hook_entries *p;
487 
488 	pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
489 	if (!pp)
490 		return;
491 
492 	mutex_lock(&nf_hook_mutex);
493 
494 	p = nf_entry_dereference(*pp);
495 	if (WARN_ON_ONCE(!p)) {
496 		mutex_unlock(&nf_hook_mutex);
497 		return;
498 	}
499 
500 	if (nf_remove_net_hook(p, reg)) {
501 #ifdef CONFIG_NETFILTER_INGRESS
502 		if (nf_ingress_hook(reg, pf))
503 			net_dec_ingress_queue();
504 #endif
505 #ifdef CONFIG_NETFILTER_EGRESS
506 		if (nf_egress_hook(reg, pf))
507 			net_dec_egress_queue();
508 #endif
509 		nf_static_key_dec(reg, pf);
510 	} else {
511 		WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
512 	}
513 
514 	p = __nf_hook_entries_try_shrink(p, pp);
515 	mutex_unlock(&nf_hook_mutex);
516 	if (!p)
517 		return;
518 
519 	nf_queue_nf_hook_drop(net);
520 	nf_hook_entries_free(p);
521 }
522 
nf_unregister_net_hook(struct net * net,const struct nf_hook_ops * reg)523 void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
524 {
525 	if (reg->pf == NFPROTO_INET) {
526 		if (reg->hooknum == NF_INET_INGRESS) {
527 			__nf_unregister_net_hook(net, NFPROTO_INET, reg);
528 		} else {
529 			__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
530 			__nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
531 		}
532 	} else {
533 		__nf_unregister_net_hook(net, reg->pf, reg);
534 	}
535 }
536 EXPORT_SYMBOL(nf_unregister_net_hook);
537 
nf_hook_entries_delete_raw(struct nf_hook_entries __rcu ** pp,const struct nf_hook_ops * reg)538 void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp,
539 				const struct nf_hook_ops *reg)
540 {
541 	struct nf_hook_entries *p;
542 
543 	p = rcu_dereference_raw(*pp);
544 	if (nf_remove_net_hook(p, reg)) {
545 		p = __nf_hook_entries_try_shrink(p, pp);
546 		nf_hook_entries_free(p);
547 	}
548 }
549 EXPORT_SYMBOL_GPL(nf_hook_entries_delete_raw);
550 
nf_register_net_hook(struct net * net,const struct nf_hook_ops * reg)551 int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
552 {
553 	int err;
554 
555 	if (reg->pf == NFPROTO_INET) {
556 		if (reg->hooknum == NF_INET_INGRESS) {
557 			err = __nf_register_net_hook(net, NFPROTO_INET, reg);
558 			if (err < 0)
559 				return err;
560 		} else {
561 			err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
562 			if (err < 0)
563 				return err;
564 
565 			err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
566 			if (err < 0) {
567 				__nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
568 				return err;
569 			}
570 		}
571 	} else {
572 		err = __nf_register_net_hook(net, reg->pf, reg);
573 		if (err < 0)
574 			return err;
575 	}
576 
577 	return 0;
578 }
579 EXPORT_SYMBOL(nf_register_net_hook);
580 
nf_register_net_hooks(struct net * net,const struct nf_hook_ops * reg,unsigned int n)581 int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
582 			  unsigned int n)
583 {
584 	unsigned int i;
585 	int err = 0;
586 
587 	for (i = 0; i < n; i++) {
588 		err = nf_register_net_hook(net, &reg[i]);
589 		if (err)
590 			goto err;
591 	}
592 	return err;
593 
594 err:
595 	if (i > 0)
596 		nf_unregister_net_hooks(net, reg, i);
597 	return err;
598 }
599 EXPORT_SYMBOL(nf_register_net_hooks);
600 
nf_unregister_net_hooks(struct net * net,const struct nf_hook_ops * reg,unsigned int hookcount)601 void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
602 			     unsigned int hookcount)
603 {
604 	unsigned int i;
605 
606 	for (i = 0; i < hookcount; i++)
607 		nf_unregister_net_hook(net, &reg[i]);
608 }
609 EXPORT_SYMBOL(nf_unregister_net_hooks);
610 
611 /* Returns 1 if okfn() needs to be executed by the caller,
612  * -EPERM for NF_DROP, 0 otherwise.  Caller must hold rcu_read_lock. */
nf_hook_slow(struct sk_buff * skb,struct nf_hook_state * state,const struct nf_hook_entries * e,unsigned int s)613 int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
614 		 const struct nf_hook_entries *e, unsigned int s)
615 {
616 	unsigned int verdict;
617 	int ret;
618 
619 	for (; s < e->num_hook_entries; s++) {
620 		verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state);
621 		switch (verdict & NF_VERDICT_MASK) {
622 		case NF_ACCEPT:
623 			break;
624 		case NF_DROP:
625 			kfree_skb_reason(skb,
626 					 SKB_DROP_REASON_NETFILTER_DROP);
627 			ret = NF_DROP_GETERR(verdict);
628 			if (ret == 0)
629 				ret = -EPERM;
630 			return ret;
631 		case NF_QUEUE:
632 			ret = nf_queue(skb, state, s, verdict);
633 			if (ret == 1)
634 				continue;
635 			return ret;
636 		default:
637 			/* Implicit handling for NF_STOLEN, as well as any other
638 			 * non conventional verdicts.
639 			 */
640 			return 0;
641 		}
642 	}
643 
644 	return 1;
645 }
646 EXPORT_SYMBOL(nf_hook_slow);
647 
nf_hook_slow_list(struct list_head * head,struct nf_hook_state * state,const struct nf_hook_entries * e)648 void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
649 		       const struct nf_hook_entries *e)
650 {
651 	struct sk_buff *skb, *next;
652 	struct list_head sublist;
653 	int ret;
654 
655 	INIT_LIST_HEAD(&sublist);
656 
657 	list_for_each_entry_safe(skb, next, head, list) {
658 		skb_list_del_init(skb);
659 		ret = nf_hook_slow(skb, state, e, 0);
660 		if (ret == 1)
661 			list_add_tail(&skb->list, &sublist);
662 	}
663 	/* Put passed packets back on main list */
664 	list_splice(&sublist, head);
665 }
666 EXPORT_SYMBOL(nf_hook_slow_list);
667 
668 /* This needs to be compiled in any case to avoid dependencies between the
669  * nfnetlink_queue code and nf_conntrack.
670  */
671 const struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
672 EXPORT_SYMBOL_GPL(nfnl_ct_hook);
673 
674 const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
675 EXPORT_SYMBOL_GPL(nf_ct_hook);
676 
677 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
678 u8 nf_ctnetlink_has_listener;
679 EXPORT_SYMBOL_GPL(nf_ctnetlink_has_listener);
680 
681 const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
682 EXPORT_SYMBOL_GPL(nf_nat_hook);
683 
684 /* This does not belong here, but locally generated errors need it if connection
685  * tracking in use: without this, connection may not be in hash table, and hence
686  * manufactured ICMP or RST packets will not be associated with it.
687  */
nf_ct_attach(struct sk_buff * new,const struct sk_buff * skb)688 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
689 {
690 	const struct nf_ct_hook *ct_hook;
691 
692 	if (skb->_nfct) {
693 		rcu_read_lock();
694 		ct_hook = rcu_dereference(nf_ct_hook);
695 		if (ct_hook)
696 			ct_hook->attach(new, skb);
697 		rcu_read_unlock();
698 	}
699 }
700 EXPORT_SYMBOL(nf_ct_attach);
701 
nf_conntrack_destroy(struct nf_conntrack * nfct)702 void nf_conntrack_destroy(struct nf_conntrack *nfct)
703 {
704 	const struct nf_ct_hook *ct_hook;
705 
706 	rcu_read_lock();
707 	ct_hook = rcu_dereference(nf_ct_hook);
708 	if (ct_hook)
709 		ct_hook->destroy(nfct);
710 	rcu_read_unlock();
711 
712 	WARN_ON(!ct_hook);
713 }
714 EXPORT_SYMBOL(nf_conntrack_destroy);
715 
nf_ct_get_tuple_skb(struct nf_conntrack_tuple * dst_tuple,const struct sk_buff * skb)716 bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
717 			 const struct sk_buff *skb)
718 {
719 	const struct nf_ct_hook *ct_hook;
720 	bool ret = false;
721 
722 	rcu_read_lock();
723 	ct_hook = rcu_dereference(nf_ct_hook);
724 	if (ct_hook)
725 		ret = ct_hook->get_tuple_skb(dst_tuple, skb);
726 	rcu_read_unlock();
727 	return ret;
728 }
729 EXPORT_SYMBOL(nf_ct_get_tuple_skb);
730 
731 /* Built-in default zone used e.g. by modules. */
732 const struct nf_conntrack_zone nf_ct_zone_dflt = {
733 	.id	= NF_CT_DEFAULT_ZONE_ID,
734 	.dir	= NF_CT_DEFAULT_ZONE_DIR,
735 };
736 EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
737 #endif /* CONFIG_NF_CONNTRACK */
738 
739 static void __net_init
__netfilter_net_init(struct nf_hook_entries __rcu ** e,int max)740 __netfilter_net_init(struct nf_hook_entries __rcu **e, int max)
741 {
742 	int h;
743 
744 	for (h = 0; h < max; h++)
745 		RCU_INIT_POINTER(e[h], NULL);
746 }
747 
netfilter_net_init(struct net * net)748 static int __net_init netfilter_net_init(struct net *net)
749 {
750 	__netfilter_net_init(net->nf.hooks_ipv4, ARRAY_SIZE(net->nf.hooks_ipv4));
751 	__netfilter_net_init(net->nf.hooks_ipv6, ARRAY_SIZE(net->nf.hooks_ipv6));
752 #ifdef CONFIG_NETFILTER_FAMILY_ARP
753 	__netfilter_net_init(net->nf.hooks_arp, ARRAY_SIZE(net->nf.hooks_arp));
754 #endif
755 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
756 	__netfilter_net_init(get_nf_hooks_bridge(net), NF_INET_NUMHOOKS);
757 #endif
758 #ifdef CONFIG_PROC_FS
759 	net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
760 						net->proc_net);
761 	if (!net->nf.proc_netfilter) {
762 		if (!net_eq(net, &init_net))
763 			pr_err("cannot create netfilter proc entry");
764 
765 		return -ENOMEM;
766 	}
767 #endif
768 
769 	return 0;
770 }
771 
netfilter_net_exit(struct net * net)772 static void __net_exit netfilter_net_exit(struct net *net)
773 {
774 	remove_proc_entry("netfilter", net->proc_net);
775 }
776 
777 static struct pernet_operations netfilter_net_ops = {
778 	.init = netfilter_net_init,
779 	.exit = netfilter_net_exit,
780 };
781 
netfilter_init(void)782 int __init netfilter_init(void)
783 {
784 	int ret;
785 
786 	ret = register_pernet_subsys(&netfilter_net_ops);
787 	if (ret < 0)
788 		goto err;
789 
790 	ret = netfilter_log_init();
791 	if (ret < 0)
792 		goto err_pernet;
793 
794 	return 0;
795 err_pernet:
796 	unregister_pernet_subsys(&netfilter_net_ops);
797 err:
798 	return ret;
799 }
800