• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Rusty Russell (C)2000 -- This code is GPL.
3  * Patrick McHardy (c) 2006-2012
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/slab.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
10 #include <linux/proc_fs.h>
11 #include <linux/skbuff.h>
12 #include <linux/netfilter.h>
13 #include <linux/netfilter_ipv4.h>
14 #include <linux/netfilter_ipv6.h>
15 #include <linux/netfilter_bridge.h>
16 #include <linux/seq_file.h>
17 #include <linux/rcupdate.h>
18 #include <net/protocol.h>
19 #include <net/netfilter/nf_queue.h>
20 #include <net/dst.h>
21 
22 #include "nf_internals.h"
23 
24 /*
25  * Hook for nfnetlink_queue to register its queue handler.
26  * We do this so that most of the NFQUEUE code can be modular.
27  *
28  * Once the queue is registered it must reinject all packets it
29  * receives, no matter what.
30  */
31 
32 /* return EBUSY when somebody else is registered, return EEXIST if the
33  * same handler is registered, return 0 in case of success. */
nf_register_queue_handler(struct net * net,const struct nf_queue_handler * qh)34 void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh)
35 {
36 	/* should never happen, we only have one queueing backend in kernel */
37 	WARN_ON(rcu_access_pointer(net->nf.queue_handler));
38 	rcu_assign_pointer(net->nf.queue_handler, qh);
39 }
40 EXPORT_SYMBOL(nf_register_queue_handler);
41 
42 /* The caller must flush their queue before this */
nf_unregister_queue_handler(struct net * net)43 void nf_unregister_queue_handler(struct net *net)
44 {
45 	RCU_INIT_POINTER(net->nf.queue_handler, NULL);
46 }
47 EXPORT_SYMBOL(nf_unregister_queue_handler);
48 
nf_queue_sock_put(struct sock * sk)49 static void nf_queue_sock_put(struct sock *sk)
50 {
51 #ifdef CONFIG_INET
52 	sock_gen_put(sk);
53 #else
54 	sock_put(sk);
55 #endif
56 }
57 
nf_queue_entry_release_refs(struct nf_queue_entry * entry)58 static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
59 {
60 	struct nf_hook_state *state = &entry->state;
61 
62 	/* Release those devices we held, or Alexey will kill me. */
63 	if (state->in)
64 		dev_put(state->in);
65 	if (state->out)
66 		dev_put(state->out);
67 	if (state->sk)
68 		nf_queue_sock_put(state->sk);
69 
70 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
71 	if (entry->physin)
72 		dev_put(entry->physin);
73 	if (entry->physout)
74 		dev_put(entry->physout);
75 #endif
76 }
77 
nf_queue_entry_free(struct nf_queue_entry * entry)78 void nf_queue_entry_free(struct nf_queue_entry *entry)
79 {
80 	nf_queue_entry_release_refs(entry);
81 	kfree(entry);
82 }
83 EXPORT_SYMBOL_GPL(nf_queue_entry_free);
84 
__nf_queue_entry_init_physdevs(struct nf_queue_entry * entry)85 static void __nf_queue_entry_init_physdevs(struct nf_queue_entry *entry)
86 {
87 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
88 	const struct sk_buff *skb = entry->skb;
89 	struct nf_bridge_info *nf_bridge;
90 
91 	nf_bridge = nf_bridge_info_get(skb);
92 	if (nf_bridge) {
93 		entry->physin = nf_bridge_get_physindev(skb);
94 		entry->physout = nf_bridge_get_physoutdev(skb);
95 	} else {
96 		entry->physin = NULL;
97 		entry->physout = NULL;
98 	}
99 #endif
100 }
101 
102 /* Bump dev refs so they don't vanish while packet is out */
nf_queue_entry_get_refs(struct nf_queue_entry * entry)103 bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
104 {
105 	struct nf_hook_state *state = &entry->state;
106 
107 	if (state->sk && !refcount_inc_not_zero(&state->sk->sk_refcnt))
108 		return false;
109 
110 	if (state->in)
111 		dev_hold(state->in);
112 	if (state->out)
113 		dev_hold(state->out);
114 
115 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
116 	if (entry->physin)
117 		dev_hold(entry->physin);
118 	if (entry->physout)
119 		dev_hold(entry->physout);
120 #endif
121 	return true;
122 }
123 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
124 
nf_queue_nf_hook_drop(struct net * net)125 void nf_queue_nf_hook_drop(struct net *net)
126 {
127 	const struct nf_queue_handler *qh;
128 
129 	rcu_read_lock();
130 	qh = rcu_dereference(net->nf.queue_handler);
131 	if (qh)
132 		qh->nf_hook_drop(net);
133 	rcu_read_unlock();
134 }
135 EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop);
136 
nf_ip_saveroute(const struct sk_buff * skb,struct nf_queue_entry * entry)137 static void nf_ip_saveroute(const struct sk_buff *skb,
138 			    struct nf_queue_entry *entry)
139 {
140 	struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
141 
142 	if (entry->state.hook == NF_INET_LOCAL_OUT) {
143 		const struct iphdr *iph = ip_hdr(skb);
144 
145 		rt_info->tos = iph->tos;
146 		rt_info->daddr = iph->daddr;
147 		rt_info->saddr = iph->saddr;
148 		rt_info->mark = skb->mark;
149 	}
150 }
151 
nf_ip6_saveroute(const struct sk_buff * skb,struct nf_queue_entry * entry)152 static void nf_ip6_saveroute(const struct sk_buff *skb,
153 			     struct nf_queue_entry *entry)
154 {
155 	struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
156 
157 	if (entry->state.hook == NF_INET_LOCAL_OUT) {
158 		const struct ipv6hdr *iph = ipv6_hdr(skb);
159 
160 		rt_info->daddr = iph->daddr;
161 		rt_info->saddr = iph->saddr;
162 		rt_info->mark = skb->mark;
163 	}
164 }
165 
__nf_queue(struct sk_buff * skb,const struct nf_hook_state * state,unsigned int index,unsigned int queuenum)166 static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
167 		      unsigned int index, unsigned int queuenum)
168 {
169 	struct nf_queue_entry *entry = NULL;
170 	const struct nf_queue_handler *qh;
171 	struct net *net = state->net;
172 	unsigned int route_key_size;
173 	int status;
174 
175 	/* QUEUE == DROP if no one is waiting, to be safe. */
176 	qh = rcu_dereference(net->nf.queue_handler);
177 	if (!qh)
178 		return -ESRCH;
179 
180 	switch (state->pf) {
181 	case AF_INET:
182 		route_key_size = sizeof(struct ip_rt_info);
183 		break;
184 	case AF_INET6:
185 		route_key_size = sizeof(struct ip6_rt_info);
186 		break;
187 	default:
188 		route_key_size = 0;
189 		break;
190 	}
191 
192 	if (skb_sk_is_prefetched(skb)) {
193 		struct sock *sk = skb->sk;
194 
195 		if (!sk_is_refcounted(sk)) {
196 			if (!refcount_inc_not_zero(&sk->sk_refcnt))
197 				return -ENOTCONN;
198 
199 			/* drop refcount on skb_orphan */
200 			skb->destructor = sock_edemux;
201 		}
202 	}
203 
204 	entry = kmalloc(sizeof(*entry) + route_key_size, GFP_ATOMIC);
205 	if (!entry)
206 		return -ENOMEM;
207 
208 	if (skb_dst(skb) && !skb_dst_force(skb)) {
209 		kfree(entry);
210 		return -ENETDOWN;
211 	}
212 
213 	*entry = (struct nf_queue_entry) {
214 		.skb	= skb,
215 		.state	= *state,
216 		.hook_index = index,
217 		.size	= sizeof(*entry) + route_key_size,
218 	};
219 
220 	__nf_queue_entry_init_physdevs(entry);
221 
222 	if (!nf_queue_entry_get_refs(entry)) {
223 		kfree(entry);
224 		return -ENOTCONN;
225 	}
226 
227 	switch (entry->state.pf) {
228 	case AF_INET:
229 		nf_ip_saveroute(skb, entry);
230 		break;
231 	case AF_INET6:
232 		nf_ip6_saveroute(skb, entry);
233 		break;
234 	}
235 
236 	status = qh->outfn(entry, queuenum);
237 	if (status < 0) {
238 		nf_queue_entry_free(entry);
239 		return status;
240 	}
241 
242 	return 0;
243 }
244 
245 /* Packets leaving via this function must come back through nf_reinject(). */
nf_queue(struct sk_buff * skb,struct nf_hook_state * state,unsigned int index,unsigned int verdict)246 int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
247 	     unsigned int index, unsigned int verdict)
248 {
249 	int ret;
250 
251 	ret = __nf_queue(skb, state, index, verdict >> NF_VERDICT_QBITS);
252 	if (ret < 0) {
253 		if (ret == -ESRCH &&
254 		    (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
255 			return 1;
256 		kfree_skb(skb);
257 	}
258 
259 	return 0;
260 }
261 EXPORT_SYMBOL_GPL(nf_queue);
262 
nf_iterate(struct sk_buff * skb,struct nf_hook_state * state,const struct nf_hook_entries * hooks,unsigned int * index)263 static unsigned int nf_iterate(struct sk_buff *skb,
264 			       struct nf_hook_state *state,
265 			       const struct nf_hook_entries *hooks,
266 			       unsigned int *index)
267 {
268 	const struct nf_hook_entry *hook;
269 	unsigned int verdict, i = *index;
270 
271 	while (i < hooks->num_hook_entries) {
272 		hook = &hooks->hooks[i];
273 repeat:
274 		verdict = nf_hook_entry_hookfn(hook, skb, state);
275 		if (verdict != NF_ACCEPT) {
276 			*index = i;
277 			if (verdict != NF_REPEAT)
278 				return verdict;
279 			goto repeat;
280 		}
281 		i++;
282 	}
283 
284 	*index = i;
285 	return NF_ACCEPT;
286 }
287 
nf_hook_entries_head(const struct net * net,u8 pf,u8 hooknum)288 static struct nf_hook_entries *nf_hook_entries_head(const struct net *net, u8 pf, u8 hooknum)
289 {
290 	switch (pf) {
291 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
292 	case NFPROTO_BRIDGE:
293 		return rcu_dereference(net->nf.hooks_bridge[hooknum]);
294 #endif
295 	case NFPROTO_IPV4:
296 		return rcu_dereference(net->nf.hooks_ipv4[hooknum]);
297 	case NFPROTO_IPV6:
298 		return rcu_dereference(net->nf.hooks_ipv6[hooknum]);
299 	default:
300 		WARN_ON_ONCE(1);
301 		return NULL;
302 	}
303 
304 	return NULL;
305 }
306 
307 /* Caller must hold rcu read-side lock */
nf_reinject(struct nf_queue_entry * entry,unsigned int verdict)308 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
309 {
310 	const struct nf_hook_entry *hook_entry;
311 	const struct nf_hook_entries *hooks;
312 	struct sk_buff *skb = entry->skb;
313 	const struct net *net;
314 	unsigned int i;
315 	int err;
316 	u8 pf;
317 
318 	net = entry->state.net;
319 	pf = entry->state.pf;
320 
321 	hooks = nf_hook_entries_head(net, pf, entry->state.hook);
322 
323 	i = entry->hook_index;
324 	if (WARN_ON_ONCE(!hooks || i >= hooks->num_hook_entries)) {
325 		kfree_skb(skb);
326 		nf_queue_entry_free(entry);
327 		return;
328 	}
329 
330 	hook_entry = &hooks->hooks[i];
331 
332 	/* Continue traversal iff userspace said ok... */
333 	if (verdict == NF_REPEAT)
334 		verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state);
335 
336 	if (verdict == NF_ACCEPT) {
337 		if (nf_reroute(skb, entry) < 0)
338 			verdict = NF_DROP;
339 	}
340 
341 	if (verdict == NF_ACCEPT) {
342 next_hook:
343 		++i;
344 		verdict = nf_iterate(skb, &entry->state, hooks, &i);
345 	}
346 
347 	switch (verdict & NF_VERDICT_MASK) {
348 	case NF_ACCEPT:
349 	case NF_STOP:
350 		local_bh_disable();
351 		entry->state.okfn(entry->state.net, entry->state.sk, skb);
352 		local_bh_enable();
353 		break;
354 	case NF_QUEUE:
355 		err = nf_queue(skb, &entry->state, i, verdict);
356 		if (err == 1)
357 			goto next_hook;
358 		break;
359 	case NF_STOLEN:
360 		break;
361 	default:
362 		kfree_skb(skb);
363 	}
364 
365 	nf_queue_entry_free(entry);
366 }
367 EXPORT_SYMBOL(nf_reinject);
368