1 /* netfilter.c: look after the filters for various protocols.
2 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
3 *
4 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
5 * way.
6 *
7 * Rusty Russell (C)2000 -- This code is GPL.
8 * Patrick McHardy (c) 2006-2012
9 */
10 #include <linux/kernel.h>
11 #include <linux/netfilter.h>
12 #include <net/protocol.h>
13 #include <linux/init.h>
14 #include <linux/skbuff.h>
15 #include <linux/wait.h>
16 #include <linux/module.h>
17 #include <linux/interrupt.h>
18 #include <linux/if.h>
19 #include <linux/netdevice.h>
20 #include <linux/inetdevice.h>
21 #include <linux/proc_fs.h>
22 #include <linux/mutex.h>
23 #include <linux/slab.h>
24 #include <net/net_namespace.h>
25 #include <net/sock.h>
26
27 #include "nf_internals.h"
28
29 static DEFINE_MUTEX(afinfo_mutex);
30
31 const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
32 EXPORT_SYMBOL(nf_afinfo);
33 const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
34 EXPORT_SYMBOL_GPL(nf_ipv6_ops);
35
nf_register_afinfo(const struct nf_afinfo * afinfo)36 int nf_register_afinfo(const struct nf_afinfo *afinfo)
37 {
38 mutex_lock(&afinfo_mutex);
39 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
40 mutex_unlock(&afinfo_mutex);
41 return 0;
42 }
43 EXPORT_SYMBOL_GPL(nf_register_afinfo);
44
nf_unregister_afinfo(const struct nf_afinfo * afinfo)45 void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
46 {
47 mutex_lock(&afinfo_mutex);
48 RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
49 mutex_unlock(&afinfo_mutex);
50 synchronize_rcu();
51 }
52 EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
53
54 struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
55 EXPORT_SYMBOL(nf_hooks);
56
57 #ifdef HAVE_JUMP_LABEL
58 struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
59 EXPORT_SYMBOL(nf_hooks_needed);
60 #endif
61
62 static DEFINE_MUTEX(nf_hook_mutex);
63
nf_register_hook(struct nf_hook_ops * reg)64 int nf_register_hook(struct nf_hook_ops *reg)
65 {
66 struct nf_hook_ops *elem;
67
68 mutex_lock(&nf_hook_mutex);
69 list_for_each_entry(elem, &nf_hooks[reg->pf][reg->hooknum], list) {
70 if (reg->priority < elem->priority)
71 break;
72 }
73 list_add_rcu(®->list, elem->list.prev);
74 mutex_unlock(&nf_hook_mutex);
75 #ifdef HAVE_JUMP_LABEL
76 static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
77 #endif
78 return 0;
79 }
80 EXPORT_SYMBOL(nf_register_hook);
81
nf_unregister_hook(struct nf_hook_ops * reg)82 void nf_unregister_hook(struct nf_hook_ops *reg)
83 {
84 mutex_lock(&nf_hook_mutex);
85 list_del_rcu(®->list);
86 mutex_unlock(&nf_hook_mutex);
87 #ifdef HAVE_JUMP_LABEL
88 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
89 #endif
90 synchronize_net();
91 nf_queue_nf_hook_drop(reg);
92 }
93 EXPORT_SYMBOL(nf_unregister_hook);
94
nf_register_hooks(struct nf_hook_ops * reg,unsigned int n)95 int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n)
96 {
97 unsigned int i;
98 int err = 0;
99
100 for (i = 0; i < n; i++) {
101 err = nf_register_hook(®[i]);
102 if (err)
103 goto err;
104 }
105 return err;
106
107 err:
108 if (i > 0)
109 nf_unregister_hooks(reg, i);
110 return err;
111 }
112 EXPORT_SYMBOL(nf_register_hooks);
113
nf_unregister_hooks(struct nf_hook_ops * reg,unsigned int n)114 void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n)
115 {
116 while (n-- > 0)
117 nf_unregister_hook(®[n]);
118 }
119 EXPORT_SYMBOL(nf_unregister_hooks);
120
nf_iterate(struct list_head * head,struct sk_buff * skb,unsigned int hook,const struct net_device * indev,const struct net_device * outdev,struct nf_hook_ops ** elemp,int (* okfn)(struct sk_buff *),int hook_thresh)121 unsigned int nf_iterate(struct list_head *head,
122 struct sk_buff *skb,
123 unsigned int hook,
124 const struct net_device *indev,
125 const struct net_device *outdev,
126 struct nf_hook_ops **elemp,
127 int (*okfn)(struct sk_buff *),
128 int hook_thresh)
129 {
130 unsigned int verdict;
131
132 /*
133 * The caller must not block between calls to this
134 * function because of risk of continuing from deleted element.
135 */
136 list_for_each_entry_continue_rcu((*elemp), head, list) {
137 if (hook_thresh > (*elemp)->priority)
138 continue;
139
140 /* Optimization: we don't need to hold module
141 reference here, since function can't sleep. --RR */
142 repeat:
143 verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn);
144 if (verdict != NF_ACCEPT) {
145 #ifdef CONFIG_NETFILTER_DEBUG
146 if (unlikely((verdict & NF_VERDICT_MASK)
147 > NF_MAX_VERDICT)) {
148 NFDEBUG("Evil return from %p(%u).\n",
149 (*elemp)->hook, hook);
150 continue;
151 }
152 #endif
153 if (verdict != NF_REPEAT)
154 return verdict;
155 goto repeat;
156 }
157 }
158 return NF_ACCEPT;
159 }
160
161
162 /* Returns 1 if okfn() needs to be executed by the caller,
163 * -EPERM for NF_DROP, 0 otherwise. */
nf_hook_slow(u_int8_t pf,unsigned int hook,struct sk_buff * skb,struct net_device * indev,struct net_device * outdev,int (* okfn)(struct sk_buff *),int hook_thresh)164 int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb,
165 struct net_device *indev,
166 struct net_device *outdev,
167 int (*okfn)(struct sk_buff *),
168 int hook_thresh)
169 {
170 struct nf_hook_ops *elem;
171 unsigned int verdict;
172 int ret = 0;
173
174 /* We may already have this, but read-locks nest anyway */
175 rcu_read_lock();
176
177 elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list);
178 next_hook:
179 verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev,
180 outdev, &elem, okfn, hook_thresh);
181 if (verdict == NF_ACCEPT || verdict == NF_STOP) {
182 ret = 1;
183 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
184 kfree_skb(skb);
185 ret = NF_DROP_GETERR(verdict);
186 if (ret == 0)
187 ret = -EPERM;
188 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
189 int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
190 verdict >> NF_VERDICT_QBITS);
191 if (err < 0) {
192 if (err == -ECANCELED)
193 goto next_hook;
194 if (err == -ESRCH &&
195 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
196 goto next_hook;
197 kfree_skb(skb);
198 }
199 }
200 rcu_read_unlock();
201 return ret;
202 }
203 EXPORT_SYMBOL(nf_hook_slow);
204
205
skb_make_writable(struct sk_buff * skb,unsigned int writable_len)206 int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
207 {
208 if (writable_len > skb->len)
209 return 0;
210
211 /* Not exclusive use of packet? Must copy. */
212 if (!skb_cloned(skb)) {
213 if (writable_len <= skb_headlen(skb))
214 return 1;
215 } else if (skb_clone_writable(skb, writable_len))
216 return 1;
217
218 if (writable_len <= skb_headlen(skb))
219 writable_len = 0;
220 else
221 writable_len -= skb_headlen(skb);
222
223 return !!__pskb_pull_tail(skb, writable_len);
224 }
225 EXPORT_SYMBOL(skb_make_writable);
226
227 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
228 /* This does not belong here, but locally generated errors need it if connection
229 tracking in use: without this, connection may not be in hash table, and hence
230 manufactured ICMP or RST packets will not be associated with it. */
231 void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
232 __rcu __read_mostly;
233 EXPORT_SYMBOL(ip_ct_attach);
234
nf_ct_attach(struct sk_buff * new,const struct sk_buff * skb)235 void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
236 {
237 void (*attach)(struct sk_buff *, const struct sk_buff *);
238
239 if (skb->nfct) {
240 rcu_read_lock();
241 attach = rcu_dereference(ip_ct_attach);
242 if (attach)
243 attach(new, skb);
244 rcu_read_unlock();
245 }
246 }
247 EXPORT_SYMBOL(nf_ct_attach);
248
249 void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
250 EXPORT_SYMBOL(nf_ct_destroy);
251
nf_conntrack_destroy(struct nf_conntrack * nfct)252 void nf_conntrack_destroy(struct nf_conntrack *nfct)
253 {
254 void (*destroy)(struct nf_conntrack *);
255
256 rcu_read_lock();
257 destroy = rcu_dereference(nf_ct_destroy);
258 BUG_ON(destroy == NULL);
259 destroy(nfct);
260 rcu_read_unlock();
261 }
262 EXPORT_SYMBOL(nf_conntrack_destroy);
263
264 struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
265 EXPORT_SYMBOL_GPL(nfq_ct_hook);
266
267 struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook __read_mostly;
268 EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
269
270 #endif /* CONFIG_NF_CONNTRACK */
271
272 #ifdef CONFIG_NF_NAT_NEEDED
273 void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
274 EXPORT_SYMBOL(nf_nat_decode_session_hook);
275 #endif
276
netfilter_net_init(struct net * net)277 static int __net_init netfilter_net_init(struct net *net)
278 {
279 #ifdef CONFIG_PROC_FS
280 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
281 net->proc_net);
282 if (!net->nf.proc_netfilter) {
283 if (!net_eq(net, &init_net))
284 pr_err("cannot create netfilter proc entry");
285
286 return -ENOMEM;
287 }
288 #endif
289 return 0;
290 }
291
netfilter_net_exit(struct net * net)292 static void __net_exit netfilter_net_exit(struct net *net)
293 {
294 remove_proc_entry("netfilter", net->proc_net);
295 }
296
297 static struct pernet_operations netfilter_net_ops = {
298 .init = netfilter_net_init,
299 .exit = netfilter_net_exit,
300 };
301
netfilter_init(void)302 int __init netfilter_init(void)
303 {
304 int i, h, ret;
305
306 for (i = 0; i < ARRAY_SIZE(nf_hooks); i++) {
307 for (h = 0; h < NF_MAX_HOOKS; h++)
308 INIT_LIST_HEAD(&nf_hooks[i][h]);
309 }
310
311 ret = register_pernet_subsys(&netfilter_net_ops);
312 if (ret < 0)
313 goto err;
314
315 ret = netfilter_log_init();
316 if (ret < 0)
317 goto err_pernet;
318
319 return 0;
320 err_pernet:
321 unregister_pernet_subsys(&netfilter_net_ops);
322 err:
323 return ret;
324 }
325