• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/netlink.h>
13 #include <linux/netfilter.h>
14 #include <linux/netfilter/nf_tables.h>
15 #include <net/netfilter/nf_tables.h>
16 #include <net/netfilter/nft_reject.h>
17 #include <net/netfilter/ipv4/nf_reject.h>
18 #include <net/netfilter/ipv6/nf_reject.h>
19 #include <linux/ip.h>
20 #include <net/ip.h>
21 #include <net/ip6_checksum.h>
22 #include <linux/netfilter_bridge.h>
23 #include <linux/netfilter_ipv6.h>
24 #include "../br_private.h"
25 
nft_reject_br_push_etherhdr(struct sk_buff * oldskb,struct sk_buff * nskb)26 static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
27 					struct sk_buff *nskb)
28 {
29 	struct ethhdr *eth;
30 
31 	eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
32 	skb_reset_mac_header(nskb);
33 	ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
34 	ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
35 	eth->h_proto = eth_hdr(oldskb)->h_proto;
36 	skb_pull(nskb, ETH_HLEN);
37 }
38 
nft_bridge_iphdr_validate(struct sk_buff * skb)39 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
40 {
41 	struct iphdr *iph;
42 	u32 len;
43 
44 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
45 		return 0;
46 
47 	iph = ip_hdr(skb);
48 	if (iph->ihl < 5 || iph->version != 4)
49 		return 0;
50 
51 	len = ntohs(iph->tot_len);
52 	if (skb->len < len)
53 		return 0;
54 	else if (len < (iph->ihl*4))
55 		return 0;
56 
57 	if (!pskb_may_pull(skb, iph->ihl*4))
58 		return 0;
59 
60 	return 1;
61 }
62 
63 /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
64  * or the bridge port (NF_BRIDGE PREROUTING).
65  */
nft_reject_br_send_v4_tcp_reset(struct net * net,struct sk_buff * oldskb,const struct net_device * dev,int hook)66 static void nft_reject_br_send_v4_tcp_reset(struct net *net,
67 					    struct sk_buff *oldskb,
68 					    const struct net_device *dev,
69 					    int hook)
70 {
71 	struct sk_buff *nskb;
72 	struct iphdr *niph;
73 	const struct tcphdr *oth;
74 	struct tcphdr _oth;
75 
76 	if (!nft_bridge_iphdr_validate(oldskb))
77 		return;
78 
79 	oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
80 	if (!oth)
81 		return;
82 
83 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
84 			 LL_MAX_HEADER, GFP_ATOMIC);
85 	if (!nskb)
86 		return;
87 
88 	skb_reserve(nskb, LL_MAX_HEADER);
89 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
90 				   net->ipv4.sysctl_ip_default_ttl);
91 	nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
92 	niph->ttl	= net->ipv4.sysctl_ip_default_ttl;
93 	niph->tot_len	= htons(nskb->len);
94 	ip_send_check(niph);
95 
96 	nft_reject_br_push_etherhdr(oldskb, nskb);
97 
98 	br_forward(br_port_get_rcu(dev), nskb, false, true);
99 }
100 
nft_reject_br_send_v4_unreach(struct net * net,struct sk_buff * oldskb,const struct net_device * dev,int hook,u8 code)101 static void nft_reject_br_send_v4_unreach(struct net *net,
102 					  struct sk_buff *oldskb,
103 					  const struct net_device *dev,
104 					  int hook, u8 code)
105 {
106 	struct sk_buff *nskb;
107 	struct iphdr *niph;
108 	struct icmphdr *icmph;
109 	unsigned int len;
110 	void *payload;
111 	__wsum csum;
112 	u8 proto;
113 
114 	if (oldskb->csum_bad || !nft_bridge_iphdr_validate(oldskb))
115 		return;
116 
117 	/* IP header checks: fragment. */
118 	if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
119 		return;
120 
121 	/* RFC says return as much as we can without exceeding 576 bytes. */
122 	len = min_t(unsigned int, 536, oldskb->len);
123 
124 	if (!pskb_may_pull(oldskb, len))
125 		return;
126 
127 	if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
128 		return;
129 
130 	if (ip_hdr(oldskb)->protocol == IPPROTO_TCP ||
131 	    ip_hdr(oldskb)->protocol == IPPROTO_UDP)
132 		proto = ip_hdr(oldskb)->protocol;
133 	else
134 		proto = 0;
135 
136 	if (!skb_csum_unnecessary(oldskb) &&
137 	    nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
138 		return;
139 
140 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
141 			 LL_MAX_HEADER + len, GFP_ATOMIC);
142 	if (!nskb)
143 		return;
144 
145 	skb_reserve(nskb, LL_MAX_HEADER);
146 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
147 				   net->ipv4.sysctl_ip_default_ttl);
148 
149 	skb_reset_transport_header(nskb);
150 	icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
151 	memset(icmph, 0, sizeof(*icmph));
152 	icmph->type     = ICMP_DEST_UNREACH;
153 	icmph->code	= code;
154 
155 	payload = skb_put(nskb, len);
156 	memcpy(payload, skb_network_header(oldskb), len);
157 
158 	csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
159 	icmph->checksum = csum_fold(csum);
160 
161 	niph->tot_len	= htons(nskb->len);
162 	ip_send_check(niph);
163 
164 	nft_reject_br_push_etherhdr(oldskb, nskb);
165 
166 	br_forward(br_port_get_rcu(dev), nskb, false, true);
167 }
168 
nft_bridge_ip6hdr_validate(struct sk_buff * skb)169 static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
170 {
171 	struct ipv6hdr *hdr;
172 	u32 pkt_len;
173 
174 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
175 		return 0;
176 
177 	hdr = ipv6_hdr(skb);
178 	if (hdr->version != 6)
179 		return 0;
180 
181 	pkt_len = ntohs(hdr->payload_len);
182 	if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
183 		return 0;
184 
185 	return 1;
186 }
187 
nft_reject_br_send_v6_tcp_reset(struct net * net,struct sk_buff * oldskb,const struct net_device * dev,int hook)188 static void nft_reject_br_send_v6_tcp_reset(struct net *net,
189 					    struct sk_buff *oldskb,
190 					    const struct net_device *dev,
191 					    int hook)
192 {
193 	struct sk_buff *nskb;
194 	const struct tcphdr *oth;
195 	struct tcphdr _oth;
196 	unsigned int otcplen;
197 	struct ipv6hdr *nip6h;
198 
199 	if (!nft_bridge_ip6hdr_validate(oldskb))
200 		return;
201 
202 	oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
203 	if (!oth)
204 		return;
205 
206 	nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
207 			 LL_MAX_HEADER, GFP_ATOMIC);
208 	if (!nskb)
209 		return;
210 
211 	skb_reserve(nskb, LL_MAX_HEADER);
212 	nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
213 				     net->ipv6.devconf_all->hop_limit);
214 	nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
215 	nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
216 
217 	nft_reject_br_push_etherhdr(oldskb, nskb);
218 
219 	br_forward(br_port_get_rcu(dev), nskb, false, true);
220 }
221 
reject6_br_csum_ok(struct sk_buff * skb,int hook)222 static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
223 {
224 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
225 	int thoff;
226 	__be16 fo;
227 	u8 proto = ip6h->nexthdr;
228 
229 	if (skb->csum_bad)
230 		return false;
231 
232 	if (skb_csum_unnecessary(skb))
233 		return true;
234 
235 	if (ip6h->payload_len &&
236 	    pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
237 		return false;
238 
239 	thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
240 	if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
241 		return false;
242 
243 	return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
244 }
245 
nft_reject_br_send_v6_unreach(struct net * net,struct sk_buff * oldskb,const struct net_device * dev,int hook,u8 code)246 static void nft_reject_br_send_v6_unreach(struct net *net,
247 					  struct sk_buff *oldskb,
248 					  const struct net_device *dev,
249 					  int hook, u8 code)
250 {
251 	struct sk_buff *nskb;
252 	struct ipv6hdr *nip6h;
253 	struct icmp6hdr *icmp6h;
254 	unsigned int len;
255 	void *payload;
256 
257 	if (!nft_bridge_ip6hdr_validate(oldskb))
258 		return;
259 
260 	/* Include "As much of invoking packet as possible without the ICMPv6
261 	 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
262 	 */
263 	len = min_t(unsigned int, 1220, oldskb->len);
264 
265 	if (!pskb_may_pull(oldskb, len))
266 		return;
267 
268 	if (!reject6_br_csum_ok(oldskb, hook))
269 		return;
270 
271 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
272 			 LL_MAX_HEADER + len, GFP_ATOMIC);
273 	if (!nskb)
274 		return;
275 
276 	skb_reserve(nskb, LL_MAX_HEADER);
277 	nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
278 				     net->ipv6.devconf_all->hop_limit);
279 
280 	skb_reset_transport_header(nskb);
281 	icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
282 	memset(icmp6h, 0, sizeof(*icmp6h));
283 	icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
284 	icmp6h->icmp6_code = code;
285 
286 	payload = skb_put(nskb, len);
287 	memcpy(payload, skb_network_header(oldskb), len);
288 	nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
289 
290 	icmp6h->icmp6_cksum =
291 		csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
292 				nskb->len - sizeof(struct ipv6hdr),
293 				IPPROTO_ICMPV6,
294 				csum_partial(icmp6h,
295 					     nskb->len - sizeof(struct ipv6hdr),
296 					     0));
297 
298 	nft_reject_br_push_etherhdr(oldskb, nskb);
299 
300 	br_forward(br_port_get_rcu(dev), nskb, false, true);
301 }
302 
nft_reject_bridge_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)303 static void nft_reject_bridge_eval(const struct nft_expr *expr,
304 				   struct nft_regs *regs,
305 				   const struct nft_pktinfo *pkt)
306 {
307 	struct nft_reject *priv = nft_expr_priv(expr);
308 	const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
309 
310 	if (is_broadcast_ether_addr(dest) ||
311 	    is_multicast_ether_addr(dest))
312 		goto out;
313 
314 	switch (eth_hdr(pkt->skb)->h_proto) {
315 	case htons(ETH_P_IP):
316 		switch (priv->type) {
317 		case NFT_REJECT_ICMP_UNREACH:
318 			nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
319 						      pkt->in, pkt->hook,
320 						      priv->icmp_code);
321 			break;
322 		case NFT_REJECT_TCP_RST:
323 			nft_reject_br_send_v4_tcp_reset(pkt->net, pkt->skb,
324 							pkt->in, pkt->hook);
325 			break;
326 		case NFT_REJECT_ICMPX_UNREACH:
327 			nft_reject_br_send_v4_unreach(pkt->net, pkt->skb,
328 						      pkt->in, pkt->hook,
329 						      nft_reject_icmp_code(priv->icmp_code));
330 			break;
331 		}
332 		break;
333 	case htons(ETH_P_IPV6):
334 		switch (priv->type) {
335 		case NFT_REJECT_ICMP_UNREACH:
336 			nft_reject_br_send_v6_unreach(pkt->net, pkt->skb,
337 						      pkt->in, pkt->hook,
338 						      priv->icmp_code);
339 			break;
340 		case NFT_REJECT_TCP_RST:
341 			nft_reject_br_send_v6_tcp_reset(pkt->net, pkt->skb,
342 							pkt->in, pkt->hook);
343 			break;
344 		case NFT_REJECT_ICMPX_UNREACH:
345 			nft_reject_br_send_v6_unreach(pkt->net, pkt->skb,
346 						      pkt->in, pkt->hook,
347 						      nft_reject_icmpv6_code(priv->icmp_code));
348 			break;
349 		}
350 		break;
351 	default:
352 		/* No explicit way to reject this protocol, drop it. */
353 		break;
354 	}
355 out:
356 	regs->verdict.code = NF_DROP;
357 }
358 
nft_reject_bridge_validate(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nft_data ** data)359 static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
360 				      const struct nft_expr *expr,
361 				      const struct nft_data **data)
362 {
363 	return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
364 						    (1 << NF_BR_LOCAL_IN));
365 }
366 
nft_reject_bridge_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])367 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
368 				  const struct nft_expr *expr,
369 				  const struct nlattr * const tb[])
370 {
371 	struct nft_reject *priv = nft_expr_priv(expr);
372 	int icmp_code, err;
373 
374 	err = nft_reject_bridge_validate(ctx, expr, NULL);
375 	if (err < 0)
376 		return err;
377 
378 	if (tb[NFTA_REJECT_TYPE] == NULL)
379 		return -EINVAL;
380 
381 	priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
382 	switch (priv->type) {
383 	case NFT_REJECT_ICMP_UNREACH:
384 	case NFT_REJECT_ICMPX_UNREACH:
385 		if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
386 			return -EINVAL;
387 
388 		icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
389 		if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
390 		    icmp_code > NFT_REJECT_ICMPX_MAX)
391 			return -EINVAL;
392 
393 		priv->icmp_code = icmp_code;
394 		break;
395 	case NFT_REJECT_TCP_RST:
396 		break;
397 	default:
398 		return -EINVAL;
399 	}
400 	return 0;
401 }
402 
nft_reject_bridge_dump(struct sk_buff * skb,const struct nft_expr * expr)403 static int nft_reject_bridge_dump(struct sk_buff *skb,
404 				  const struct nft_expr *expr)
405 {
406 	const struct nft_reject *priv = nft_expr_priv(expr);
407 
408 	if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
409 		goto nla_put_failure;
410 
411 	switch (priv->type) {
412 	case NFT_REJECT_ICMP_UNREACH:
413 	case NFT_REJECT_ICMPX_UNREACH:
414 		if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
415 			goto nla_put_failure;
416 		break;
417 	default:
418 		break;
419 	}
420 
421 	return 0;
422 
423 nla_put_failure:
424 	return -1;
425 }
426 
427 static struct nft_expr_type nft_reject_bridge_type;
428 static const struct nft_expr_ops nft_reject_bridge_ops = {
429 	.type		= &nft_reject_bridge_type,
430 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_reject)),
431 	.eval		= nft_reject_bridge_eval,
432 	.init		= nft_reject_bridge_init,
433 	.dump		= nft_reject_bridge_dump,
434 	.validate	= nft_reject_bridge_validate,
435 };
436 
437 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
438 	.family		= NFPROTO_BRIDGE,
439 	.name		= "reject",
440 	.ops		= &nft_reject_bridge_ops,
441 	.policy		= nft_reject_policy,
442 	.maxattr	= NFTA_REJECT_MAX,
443 	.owner		= THIS_MODULE,
444 };
445 
nft_reject_bridge_module_init(void)446 static int __init nft_reject_bridge_module_init(void)
447 {
448 	return nft_register_expr(&nft_reject_bridge_type);
449 }
450 
nft_reject_bridge_module_exit(void)451 static void __exit nft_reject_bridge_module_exit(void)
452 {
453 	nft_unregister_expr(&nft_reject_bridge_type);
454 }
455 
456 module_init(nft_reject_bridge_module_init);
457 module_exit(nft_reject_bridge_module_exit);
458 
459 MODULE_LICENSE("GPL");
460 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
461 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
462