• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/netlink.h>
10 #include <linux/netfilter.h>
11 #include <linux/netfilter/nf_tables.h>
12 #include <net/netfilter/nf_tables.h>
13 #include <net/netfilter/nft_reject.h>
14 #include <net/netfilter/ipv4/nf_reject.h>
15 #include <net/netfilter/ipv6/nf_reject.h>
16 #include <linux/ip.h>
17 #include <net/ip.h>
18 #include <net/ip6_checksum.h>
19 #include <linux/netfilter_bridge.h>
20 #include <linux/netfilter_ipv6.h>
21 #include "../br_private.h"
22 
nft_reject_br_push_etherhdr(struct sk_buff * oldskb,struct sk_buff * nskb)23 static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
24 					struct sk_buff *nskb)
25 {
26 	struct ethhdr *eth;
27 
28 	eth = skb_push(nskb, ETH_HLEN);
29 	skb_reset_mac_header(nskb);
30 	ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
31 	ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
32 	eth->h_proto = eth_hdr(oldskb)->h_proto;
33 	skb_pull(nskb, ETH_HLEN);
34 }
35 
nft_bridge_iphdr_validate(struct sk_buff * skb)36 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
37 {
38 	struct iphdr *iph;
39 	u32 len;
40 
41 	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
42 		return 0;
43 
44 	iph = ip_hdr(skb);
45 	if (iph->ihl < 5 || iph->version != 4)
46 		return 0;
47 
48 	len = ntohs(iph->tot_len);
49 	if (skb->len < len)
50 		return 0;
51 	else if (len < (iph->ihl*4))
52 		return 0;
53 
54 	if (!pskb_may_pull(skb, iph->ihl*4))
55 		return 0;
56 
57 	return 1;
58 }
59 
60 /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
61  * or the bridge port (NF_BRIDGE PREROUTING).
62  */
nft_reject_br_send_v4_tcp_reset(struct net * net,struct sk_buff * oldskb,const struct net_device * dev,int hook)63 static void nft_reject_br_send_v4_tcp_reset(struct net *net,
64 					    struct sk_buff *oldskb,
65 					    const struct net_device *dev,
66 					    int hook)
67 {
68 	struct sk_buff *nskb;
69 	struct iphdr *niph;
70 	const struct tcphdr *oth;
71 	struct tcphdr _oth;
72 
73 	if (!nft_bridge_iphdr_validate(oldskb))
74 		return;
75 
76 	oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
77 	if (!oth)
78 		return;
79 
80 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
81 			 LL_MAX_HEADER, GFP_ATOMIC);
82 	if (!nskb)
83 		return;
84 
85 	skb_reserve(nskb, LL_MAX_HEADER);
86 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
87 				   net->ipv4.sysctl_ip_default_ttl);
88 	nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
89 	niph->tot_len = htons(nskb->len);
90 	ip_send_check(niph);
91 
92 	nft_reject_br_push_etherhdr(oldskb, nskb);
93 
94 	br_forward(br_port_get_rcu(dev), nskb, false, true);
95 }
96 
nft_reject_br_send_v4_unreach(struct net * net,struct sk_buff * oldskb,const struct net_device * dev,int hook,u8 code)97 static void nft_reject_br_send_v4_unreach(struct net *net,
98 					  struct sk_buff *oldskb,
99 					  const struct net_device *dev,
100 					  int hook, u8 code)
101 {
102 	struct sk_buff *nskb;
103 	struct iphdr *niph;
104 	struct icmphdr *icmph;
105 	unsigned int len;
106 	__wsum csum;
107 	u8 proto;
108 
109 	if (!nft_bridge_iphdr_validate(oldskb))
110 		return;
111 
112 	/* IP header checks: fragment. */
113 	if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
114 		return;
115 
116 	/* RFC says return as much as we can without exceeding 576 bytes. */
117 	len = min_t(unsigned int, 536, oldskb->len);
118 
119 	if (!pskb_may_pull(oldskb, len))
120 		return;
121 
122 	if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
123 		return;
124 
125 	proto = ip_hdr(oldskb)->protocol;
126 
127 	if (!skb_csum_unnecessary(oldskb) &&
128 	    nf_reject_verify_csum(proto) &&
129 	    nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
130 		return;
131 
132 	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
133 			 LL_MAX_HEADER + len, GFP_ATOMIC);
134 	if (!nskb)
135 		return;
136 
137 	skb_reserve(nskb, LL_MAX_HEADER);
138 	niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
139 				   net->ipv4.sysctl_ip_default_ttl);
140 
141 	skb_reset_transport_header(nskb);
142 	icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
143 	icmph->type     = ICMP_DEST_UNREACH;
144 	icmph->code	= code;
145 
146 	skb_put_data(nskb, skb_network_header(oldskb), len);
147 
148 	csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
149 	icmph->checksum = csum_fold(csum);
150 
151 	niph->tot_len	= htons(nskb->len);
152 	ip_send_check(niph);
153 
154 	nft_reject_br_push_etherhdr(oldskb, nskb);
155 
156 	br_forward(br_port_get_rcu(dev), nskb, false, true);
157 }
158 
nft_bridge_ip6hdr_validate(struct sk_buff * skb)159 static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
160 {
161 	struct ipv6hdr *hdr;
162 	u32 pkt_len;
163 
164 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
165 		return 0;
166 
167 	hdr = ipv6_hdr(skb);
168 	if (hdr->version != 6)
169 		return 0;
170 
171 	pkt_len = ntohs(hdr->payload_len);
172 	if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
173 		return 0;
174 
175 	return 1;
176 }
177 
nft_reject_br_send_v6_tcp_reset(struct net * net,struct sk_buff * oldskb,const struct net_device * dev,int hook)178 static void nft_reject_br_send_v6_tcp_reset(struct net *net,
179 					    struct sk_buff *oldskb,
180 					    const struct net_device *dev,
181 					    int hook)
182 {
183 	struct sk_buff *nskb;
184 	const struct tcphdr *oth;
185 	struct tcphdr _oth;
186 	unsigned int otcplen;
187 	struct ipv6hdr *nip6h;
188 
189 	if (!nft_bridge_ip6hdr_validate(oldskb))
190 		return;
191 
192 	oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
193 	if (!oth)
194 		return;
195 
196 	nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
197 			 LL_MAX_HEADER, GFP_ATOMIC);
198 	if (!nskb)
199 		return;
200 
201 	skb_reserve(nskb, LL_MAX_HEADER);
202 	nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
203 				     net->ipv6.devconf_all->hop_limit);
204 	nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
205 	nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
206 
207 	nft_reject_br_push_etherhdr(oldskb, nskb);
208 
209 	br_forward(br_port_get_rcu(dev), nskb, false, true);
210 }
211 
reject6_br_csum_ok(struct sk_buff * skb,int hook)212 static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
213 {
214 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
215 	int thoff;
216 	__be16 fo;
217 	u8 proto = ip6h->nexthdr;
218 
219 	if (skb_csum_unnecessary(skb))
220 		return true;
221 
222 	if (ip6h->payload_len &&
223 	    pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
224 		return false;
225 
226 	ip6h = ipv6_hdr(skb);
227 	thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
228 	if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
229 		return false;
230 
231 	if (!nf_reject_verify_csum(proto))
232 		return true;
233 
234 	return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
235 }
236 
nft_reject_br_send_v6_unreach(struct net * net,struct sk_buff * oldskb,const struct net_device * dev,int hook,u8 code)237 static void nft_reject_br_send_v6_unreach(struct net *net,
238 					  struct sk_buff *oldskb,
239 					  const struct net_device *dev,
240 					  int hook, u8 code)
241 {
242 	struct sk_buff *nskb;
243 	struct ipv6hdr *nip6h;
244 	struct icmp6hdr *icmp6h;
245 	unsigned int len;
246 
247 	if (!nft_bridge_ip6hdr_validate(oldskb))
248 		return;
249 
250 	/* Include "As much of invoking packet as possible without the ICMPv6
251 	 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
252 	 */
253 	len = min_t(unsigned int, 1220, oldskb->len);
254 
255 	if (!pskb_may_pull(oldskb, len))
256 		return;
257 
258 	if (!reject6_br_csum_ok(oldskb, hook))
259 		return;
260 
261 	nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
262 			 LL_MAX_HEADER + len, GFP_ATOMIC);
263 	if (!nskb)
264 		return;
265 
266 	skb_reserve(nskb, LL_MAX_HEADER);
267 	nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
268 				     net->ipv6.devconf_all->hop_limit);
269 
270 	skb_reset_transport_header(nskb);
271 	icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
272 	icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
273 	icmp6h->icmp6_code = code;
274 
275 	skb_put_data(nskb, skb_network_header(oldskb), len);
276 	nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
277 
278 	icmp6h->icmp6_cksum =
279 		csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
280 				nskb->len - sizeof(struct ipv6hdr),
281 				IPPROTO_ICMPV6,
282 				csum_partial(icmp6h,
283 					     nskb->len - sizeof(struct ipv6hdr),
284 					     0));
285 
286 	nft_reject_br_push_etherhdr(oldskb, nskb);
287 
288 	br_forward(br_port_get_rcu(dev), nskb, false, true);
289 }
290 
nft_reject_bridge_eval(const struct nft_expr * expr,struct nft_regs * regs,const struct nft_pktinfo * pkt)291 static void nft_reject_bridge_eval(const struct nft_expr *expr,
292 				   struct nft_regs *regs,
293 				   const struct nft_pktinfo *pkt)
294 {
295 	struct nft_reject *priv = nft_expr_priv(expr);
296 	const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
297 
298 	if (is_broadcast_ether_addr(dest) ||
299 	    is_multicast_ether_addr(dest))
300 		goto out;
301 
302 	switch (eth_hdr(pkt->skb)->h_proto) {
303 	case htons(ETH_P_IP):
304 		switch (priv->type) {
305 		case NFT_REJECT_ICMP_UNREACH:
306 			nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
307 						      nft_in(pkt),
308 						      nft_hook(pkt),
309 						      priv->icmp_code);
310 			break;
311 		case NFT_REJECT_TCP_RST:
312 			nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
313 							nft_in(pkt),
314 							nft_hook(pkt));
315 			break;
316 		case NFT_REJECT_ICMPX_UNREACH:
317 			nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
318 						      nft_in(pkt),
319 						      nft_hook(pkt),
320 						      nft_reject_icmp_code(priv->icmp_code));
321 			break;
322 		}
323 		break;
324 	case htons(ETH_P_IPV6):
325 		switch (priv->type) {
326 		case NFT_REJECT_ICMP_UNREACH:
327 			nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
328 						      nft_in(pkt),
329 						      nft_hook(pkt),
330 						      priv->icmp_code);
331 			break;
332 		case NFT_REJECT_TCP_RST:
333 			nft_reject_br_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
334 							nft_in(pkt),
335 							nft_hook(pkt));
336 			break;
337 		case NFT_REJECT_ICMPX_UNREACH:
338 			nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
339 						      nft_in(pkt),
340 						      nft_hook(pkt),
341 						      nft_reject_icmpv6_code(priv->icmp_code));
342 			break;
343 		}
344 		break;
345 	default:
346 		/* No explicit way to reject this protocol, drop it. */
347 		break;
348 	}
349 out:
350 	regs->verdict.code = NF_DROP;
351 }
352 
nft_reject_bridge_validate(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nft_data ** data)353 static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
354 				      const struct nft_expr *expr,
355 				      const struct nft_data **data)
356 {
357 	return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
358 						    (1 << NF_BR_LOCAL_IN));
359 }
360 
nft_reject_bridge_init(const struct nft_ctx * ctx,const struct nft_expr * expr,const struct nlattr * const tb[])361 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
362 				  const struct nft_expr *expr,
363 				  const struct nlattr * const tb[])
364 {
365 	struct nft_reject *priv = nft_expr_priv(expr);
366 	int icmp_code;
367 
368 	if (tb[NFTA_REJECT_TYPE] == NULL)
369 		return -EINVAL;
370 
371 	priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
372 	switch (priv->type) {
373 	case NFT_REJECT_ICMP_UNREACH:
374 	case NFT_REJECT_ICMPX_UNREACH:
375 		if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
376 			return -EINVAL;
377 
378 		icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
379 		if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
380 		    icmp_code > NFT_REJECT_ICMPX_MAX)
381 			return -EINVAL;
382 
383 		priv->icmp_code = icmp_code;
384 		break;
385 	case NFT_REJECT_TCP_RST:
386 		break;
387 	default:
388 		return -EINVAL;
389 	}
390 	return 0;
391 }
392 
nft_reject_bridge_dump(struct sk_buff * skb,const struct nft_expr * expr)393 static int nft_reject_bridge_dump(struct sk_buff *skb,
394 				  const struct nft_expr *expr)
395 {
396 	const struct nft_reject *priv = nft_expr_priv(expr);
397 
398 	if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
399 		goto nla_put_failure;
400 
401 	switch (priv->type) {
402 	case NFT_REJECT_ICMP_UNREACH:
403 	case NFT_REJECT_ICMPX_UNREACH:
404 		if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
405 			goto nla_put_failure;
406 		break;
407 	default:
408 		break;
409 	}
410 
411 	return 0;
412 
413 nla_put_failure:
414 	return -1;
415 }
416 
417 static struct nft_expr_type nft_reject_bridge_type;
418 static const struct nft_expr_ops nft_reject_bridge_ops = {
419 	.type		= &nft_reject_bridge_type,
420 	.size		= NFT_EXPR_SIZE(sizeof(struct nft_reject)),
421 	.eval		= nft_reject_bridge_eval,
422 	.init		= nft_reject_bridge_init,
423 	.dump		= nft_reject_bridge_dump,
424 	.validate	= nft_reject_bridge_validate,
425 };
426 
427 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
428 	.family		= NFPROTO_BRIDGE,
429 	.name		= "reject",
430 	.ops		= &nft_reject_bridge_ops,
431 	.policy		= nft_reject_policy,
432 	.maxattr	= NFTA_REJECT_MAX,
433 	.owner		= THIS_MODULE,
434 };
435 
nft_reject_bridge_module_init(void)436 static int __init nft_reject_bridge_module_init(void)
437 {
438 	return nft_register_expr(&nft_reject_bridge_type);
439 }
440 
nft_reject_bridge_module_exit(void)441 static void __exit nft_reject_bridge_module_exit(void)
442 {
443 	nft_unregister_expr(&nft_reject_bridge_type);
444 }
445 
446 module_init(nft_reject_bridge_module_init);
447 module_exit(nft_reject_bridge_module_exit);
448 
449 MODULE_LICENSE("GPL");
450 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
451 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
452