1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handle firewalling
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
8 * Bart De Schuymer <bdschuym@pandora.be>
9 *
10 * Lennert dedicates this file to Kerstin Wurdinger.
11 */
12
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/ip.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/if_arp.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/if_pppox.h>
23 #include <linux/ppp_defs.h>
24 #include <linux/netfilter_bridge.h>
25 #include <uapi/linux/netfilter_bridge.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter_arp.h>
29 #include <linux/in_route.h>
30 #include <linux/rculist.h>
31 #include <linux/inetdevice.h>
32
33 #include <net/ip.h>
34 #include <net/ipv6.h>
35 #include <net/addrconf.h>
36 #include <net/dst_metadata.h>
37 #include <net/route.h>
38 #include <net/netfilter/br_netfilter.h>
39 #include <net/netns/generic.h>
40 #include <net/inet_dscp.h>
41
42 #include <linux/uaccess.h>
43 #include "br_private.h"
44 #ifdef CONFIG_SYSCTL
45 #include <linux/sysctl.h>
46 #endif
47
48 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
49 #include <net/netfilter/nf_conntrack_core.h>
50 #endif
51
52 static unsigned int brnf_net_id __read_mostly;
53
54 struct brnf_net {
55 bool enabled;
56
57 #ifdef CONFIG_SYSCTL
58 struct ctl_table_header *ctl_hdr;
59 #endif
60
61 /* default value is 1 */
62 int call_iptables;
63 int call_ip6tables;
64 int call_arptables;
65
66 /* default value is 0 */
67 int filter_vlan_tagged;
68 int filter_pppoe_tagged;
69 int pass_vlan_indev;
70 };
71
72 #define IS_IP(skb) \
73 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
74
75 #define IS_IPV6(skb) \
76 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
77
78 #define IS_ARP(skb) \
79 (!skb_vlan_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
80
vlan_proto(const struct sk_buff * skb)81 static inline __be16 vlan_proto(const struct sk_buff *skb)
82 {
83 if (skb_vlan_tag_present(skb))
84 return skb->protocol;
85 else if (skb->protocol == htons(ETH_P_8021Q))
86 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
87 else
88 return 0;
89 }
90
is_vlan_ip(const struct sk_buff * skb,const struct net * net)91 static inline bool is_vlan_ip(const struct sk_buff *skb, const struct net *net)
92 {
93 struct brnf_net *brnet = net_generic(net, brnf_net_id);
94
95 return vlan_proto(skb) == htons(ETH_P_IP) && brnet->filter_vlan_tagged;
96 }
97
is_vlan_ipv6(const struct sk_buff * skb,const struct net * net)98 static inline bool is_vlan_ipv6(const struct sk_buff *skb,
99 const struct net *net)
100 {
101 struct brnf_net *brnet = net_generic(net, brnf_net_id);
102
103 return vlan_proto(skb) == htons(ETH_P_IPV6) &&
104 brnet->filter_vlan_tagged;
105 }
106
is_vlan_arp(const struct sk_buff * skb,const struct net * net)107 static inline bool is_vlan_arp(const struct sk_buff *skb, const struct net *net)
108 {
109 struct brnf_net *brnet = net_generic(net, brnf_net_id);
110
111 return vlan_proto(skb) == htons(ETH_P_ARP) && brnet->filter_vlan_tagged;
112 }
113
pppoe_proto(const struct sk_buff * skb)114 static inline __be16 pppoe_proto(const struct sk_buff *skb)
115 {
116 return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
117 sizeof(struct pppoe_hdr)));
118 }
119
is_pppoe_ip(const struct sk_buff * skb,const struct net * net)120 static inline bool is_pppoe_ip(const struct sk_buff *skb, const struct net *net)
121 {
122 struct brnf_net *brnet = net_generic(net, brnf_net_id);
123
124 return skb->protocol == htons(ETH_P_PPP_SES) &&
125 pppoe_proto(skb) == htons(PPP_IP) && brnet->filter_pppoe_tagged;
126 }
127
is_pppoe_ipv6(const struct sk_buff * skb,const struct net * net)128 static inline bool is_pppoe_ipv6(const struct sk_buff *skb,
129 const struct net *net)
130 {
131 struct brnf_net *brnet = net_generic(net, brnf_net_id);
132
133 return skb->protocol == htons(ETH_P_PPP_SES) &&
134 pppoe_proto(skb) == htons(PPP_IPV6) &&
135 brnet->filter_pppoe_tagged;
136 }
137
138 /* largest possible L2 header, see br_nf_dev_queue_xmit() */
139 #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN)
140
141 struct brnf_frag_data {
142 local_lock_t bh_lock;
143 char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH];
144 u8 encap_size;
145 u8 size;
146 u16 vlan_tci;
147 __be16 vlan_proto;
148 };
149
150 static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage) = {
151 .bh_lock = INIT_LOCAL_LOCK(bh_lock),
152 };
153
nf_bridge_info_free(struct sk_buff * skb)154 static void nf_bridge_info_free(struct sk_buff *skb)
155 {
156 skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
157 }
158
bridge_parent(const struct net_device * dev)159 static inline struct net_device *bridge_parent(const struct net_device *dev)
160 {
161 struct net_bridge_port *port;
162
163 port = br_port_get_rcu(dev);
164 return port ? port->br->dev : NULL;
165 }
166
nf_bridge_unshare(struct sk_buff * skb)167 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
168 {
169 return skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
170 }
171
nf_bridge_encap_header_len(const struct sk_buff * skb)172 unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
173 {
174 switch (skb->protocol) {
175 case __cpu_to_be16(ETH_P_8021Q):
176 return VLAN_HLEN;
177 case __cpu_to_be16(ETH_P_PPP_SES):
178 return PPPOE_SES_HLEN;
179 default:
180 return 0;
181 }
182 }
183
nf_bridge_pull_encap_header(struct sk_buff * skb)184 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
185 {
186 unsigned int len = nf_bridge_encap_header_len(skb);
187
188 skb_pull(skb, len);
189 skb->network_header += len;
190 }
191
nf_bridge_pull_encap_header_rcsum(struct sk_buff * skb)192 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
193 {
194 unsigned int len = nf_bridge_encap_header_len(skb);
195
196 skb_pull_rcsum(skb, len);
197 skb->network_header += len;
198 }
199
200 /* When handing a packet over to the IP layer
201 * check whether we have a skb that is in the
202 * expected format
203 */
204
br_validate_ipv4(struct net * net,struct sk_buff * skb)205 static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
206 {
207 const struct iphdr *iph;
208 u32 len;
209
210 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
211 goto inhdr_error;
212
213 iph = ip_hdr(skb);
214
215 /* Basic sanity checks */
216 if (iph->ihl < 5 || iph->version != 4)
217 goto inhdr_error;
218
219 if (!pskb_may_pull(skb, iph->ihl*4))
220 goto inhdr_error;
221
222 iph = ip_hdr(skb);
223 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
224 goto csum_error;
225
226 len = skb_ip_totlen(skb);
227 if (skb->len < len) {
228 __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
229 goto drop;
230 } else if (len < (iph->ihl*4))
231 goto inhdr_error;
232
233 if (pskb_trim_rcsum(skb, len)) {
234 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
235 goto drop;
236 }
237
238 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
239 /* We should really parse IP options here but until
240 * somebody who actually uses IP options complains to
241 * us we'll just silently ignore the options because
242 * we're lazy!
243 */
244 return 0;
245
246 csum_error:
247 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
248 inhdr_error:
249 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
250 drop:
251 return -1;
252 }
253
nf_bridge_update_protocol(struct sk_buff * skb)254 void nf_bridge_update_protocol(struct sk_buff *skb)
255 {
256 const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
257
258 switch (nf_bridge->orig_proto) {
259 case BRNF_PROTO_8021Q:
260 skb->protocol = htons(ETH_P_8021Q);
261 break;
262 case BRNF_PROTO_PPPOE:
263 skb->protocol = htons(ETH_P_PPP_SES);
264 break;
265 case BRNF_PROTO_UNCHANGED:
266 break;
267 }
268 }
269
270 /* Obtain the correct destination MAC address, while preserving the original
271 * source MAC address. If we already know this address, we just copy it. If we
272 * don't, we use the neighbour framework to find out. In both cases, we make
273 * sure that br_handle_frame_finish() is called afterwards.
274 */
br_nf_pre_routing_finish_bridge(struct net * net,struct sock * sk,struct sk_buff * skb)275 int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_buff *skb)
276 {
277 struct neighbour *neigh;
278 struct dst_entry *dst;
279
280 skb->dev = bridge_parent(skb->dev);
281 if (!skb->dev)
282 goto free_skb;
283 dst = skb_dst(skb);
284 neigh = dst_neigh_lookup_skb(dst, skb);
285 if (neigh) {
286 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
287 int ret;
288
289 if ((READ_ONCE(neigh->nud_state) & NUD_CONNECTED) &&
290 READ_ONCE(neigh->hh.hh_len)) {
291 struct net_device *br_indev;
292
293 br_indev = nf_bridge_get_physindev(skb, net);
294 if (!br_indev) {
295 neigh_release(neigh);
296 goto free_skb;
297 }
298
299 neigh_hh_bridge(&neigh->hh, skb);
300 skb->dev = br_indev;
301
302 ret = br_handle_frame_finish(net, sk, skb);
303 } else {
304 /* the neighbour function below overwrites the complete
305 * MAC header, so we save the Ethernet source address and
306 * protocol number.
307 */
308 skb_copy_from_linear_data_offset(skb,
309 -(ETH_HLEN-ETH_ALEN),
310 nf_bridge->neigh_header,
311 ETH_HLEN-ETH_ALEN);
312 /* tell br_dev_xmit to continue with forwarding */
313 nf_bridge->bridged_dnat = 1;
314 /* FIXME Need to refragment */
315 ret = READ_ONCE(neigh->output)(neigh, skb);
316 }
317 neigh_release(neigh);
318 return ret;
319 }
320 free_skb:
321 kfree_skb(skb);
322 return 0;
323 }
324
325 static inline bool
br_nf_ipv4_daddr_was_changed(const struct sk_buff * skb,const struct nf_bridge_info * nf_bridge)326 br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb,
327 const struct nf_bridge_info *nf_bridge)
328 {
329 return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
330 }
331
332 /* This requires some explaining. If DNAT has taken place,
333 * we will need to fix up the destination Ethernet address.
334 * This is also true when SNAT takes place (for the reply direction).
335 *
336 * There are two cases to consider:
337 * 1. The packet was DNAT'ed to a device in the same bridge
338 * port group as it was received on. We can still bridge
339 * the packet.
340 * 2. The packet was DNAT'ed to a different device, either
341 * a non-bridged device or another bridge port group.
342 * The packet will need to be routed.
343 *
344 * The correct way of distinguishing between these two cases is to
345 * call ip_route_input() and to look at skb->dst->dev, which is
346 * changed to the destination device if ip_route_input() succeeds.
347 *
348 * Let's first consider the case that ip_route_input() succeeds:
349 *
350 * If the output device equals the logical bridge device the packet
351 * came in on, we can consider this bridging. The corresponding MAC
352 * address will be obtained in br_nf_pre_routing_finish_bridge.
353 * Otherwise, the packet is considered to be routed and we just
354 * change the destination MAC address so that the packet will
355 * later be passed up to the IP stack to be routed. For a redirected
356 * packet, ip_route_input() will give back the localhost as output device,
357 * which differs from the bridge device.
358 *
359 * Let's now consider the case that ip_route_input() fails:
360 *
361 * This can be because the destination address is martian, in which case
362 * the packet will be dropped.
363 * If IP forwarding is disabled, ip_route_input() will fail, while
364 * ip_route_output_key() can return success. The source
365 * address for ip_route_output_key() is set to zero, so ip_route_output_key()
366 * thinks we're handling a locally generated packet and won't care
367 * if IP forwarding is enabled. If the output device equals the logical bridge
368 * device, we proceed as if ip_route_input() succeeded. If it differs from the
369 * logical bridge port or if ip_route_output_key() fails we drop the packet.
370 */
br_nf_pre_routing_finish(struct net * net,struct sock * sk,struct sk_buff * skb)371 static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
372 {
373 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
374 struct net_device *dev = skb->dev, *br_indev;
375 const struct iphdr *iph = ip_hdr(skb);
376 struct rtable *rt;
377 int err;
378
379 br_indev = nf_bridge_get_physindev(skb, net);
380 if (!br_indev) {
381 kfree_skb(skb);
382 return 0;
383 }
384
385 nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
386
387 if (nf_bridge->pkt_otherhost) {
388 skb->pkt_type = PACKET_OTHERHOST;
389 nf_bridge->pkt_otherhost = false;
390 }
391 nf_bridge->in_prerouting = 0;
392 if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) {
393 err = ip_route_input(skb, iph->daddr, iph->saddr,
394 ip4h_dscp(iph), dev);
395 if (err) {
396 struct in_device *in_dev = __in_dev_get_rcu(dev);
397
398 /* If err equals -EHOSTUNREACH the error is due to a
399 * martian destination or due to the fact that
400 * forwarding is disabled. For most martian packets,
401 * ip_route_output_key() will fail. It won't fail for 2 types of
402 * martian destinations: loopback destinations and destination
403 * 0.0.0.0. In both cases the packet will be dropped because the
404 * destination is the loopback device and not the bridge. */
405 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
406 goto free_skb;
407
408 rt = ip_route_output(net, iph->daddr, 0,
409 iph->tos & INET_DSCP_MASK, 0,
410 RT_SCOPE_UNIVERSE);
411 if (!IS_ERR(rt)) {
412 /* - Bridged-and-DNAT'ed traffic doesn't
413 * require ip_forwarding. */
414 if (rt->dst.dev == dev) {
415 skb_dst_drop(skb);
416 skb_dst_set(skb, &rt->dst);
417 goto bridged_dnat;
418 }
419 ip_rt_put(rt);
420 }
421 free_skb:
422 kfree_skb(skb);
423 return 0;
424 } else {
425 if (skb_dst(skb)->dev == dev) {
426 bridged_dnat:
427 skb->dev = br_indev;
428 nf_bridge_update_protocol(skb);
429 nf_bridge_push_encap_header(skb);
430 br_nf_hook_thresh(NF_BR_PRE_ROUTING,
431 net, sk, skb, skb->dev,
432 NULL,
433 br_nf_pre_routing_finish_bridge);
434 return 0;
435 }
436 ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
437 skb->pkt_type = PACKET_HOST;
438 }
439 } else {
440 rt = bridge_parent_rtable(br_indev);
441 if (!rt) {
442 kfree_skb(skb);
443 return 0;
444 }
445 skb_dst_drop(skb);
446 skb_dst_set_noref(skb, &rt->dst);
447 }
448
449 skb->dev = br_indev;
450 nf_bridge_update_protocol(skb);
451 nf_bridge_push_encap_header(skb);
452 br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL,
453 br_handle_frame_finish);
454 return 0;
455 }
456
brnf_get_logical_dev(struct sk_buff * skb,const struct net_device * dev,const struct net * net)457 static struct net_device *brnf_get_logical_dev(struct sk_buff *skb,
458 const struct net_device *dev,
459 const struct net *net)
460 {
461 struct net_device *vlan, *br;
462 struct brnf_net *brnet = net_generic(net, brnf_net_id);
463
464 br = bridge_parent(dev);
465
466 if (brnet->pass_vlan_indev == 0 || !skb_vlan_tag_present(skb))
467 return br;
468
469 vlan = __vlan_find_dev_deep_rcu(br, skb->vlan_proto,
470 skb_vlan_tag_get(skb) & VLAN_VID_MASK);
471
472 return vlan ? vlan : br;
473 }
474
475 /* Some common code for IPv4/IPv6 */
setup_pre_routing(struct sk_buff * skb,const struct net * net)476 struct net_device *setup_pre_routing(struct sk_buff *skb, const struct net *net)
477 {
478 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
479
480 if (skb->pkt_type == PACKET_OTHERHOST) {
481 skb->pkt_type = PACKET_HOST;
482 nf_bridge->pkt_otherhost = true;
483 }
484
485 nf_bridge->in_prerouting = 1;
486 nf_bridge->physinif = skb->dev->ifindex;
487 skb->dev = brnf_get_logical_dev(skb, skb->dev, net);
488
489 if (skb->protocol == htons(ETH_P_8021Q))
490 nf_bridge->orig_proto = BRNF_PROTO_8021Q;
491 else if (skb->protocol == htons(ETH_P_PPP_SES))
492 nf_bridge->orig_proto = BRNF_PROTO_PPPOE;
493
494 /* Must drop socket now because of tproxy. */
495 skb_orphan(skb);
496 return skb->dev;
497 }
498
499 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
500 * Replicate the checks that IPv4 does on packet reception.
501 * Set skb->dev to the bridge device (i.e. parent of the
502 * receiving device) to make netfilter happy, the REDIRECT
503 * target in particular. Save the original destination IP
504 * address to be able to detect DNAT afterwards. */
br_nf_pre_routing(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)505 static unsigned int br_nf_pre_routing(void *priv,
506 struct sk_buff *skb,
507 const struct nf_hook_state *state)
508 {
509 struct nf_bridge_info *nf_bridge;
510 struct net_bridge_port *p;
511 struct net_bridge *br;
512 __u32 len = nf_bridge_encap_header_len(skb);
513 struct brnf_net *brnet;
514
515 if (unlikely(!pskb_may_pull(skb, len)))
516 return NF_DROP_REASON(skb, SKB_DROP_REASON_PKT_TOO_SMALL, 0);
517
518 p = br_port_get_rcu(state->in);
519 if (p == NULL)
520 return NF_DROP_REASON(skb, SKB_DROP_REASON_DEV_READY, 0);
521 br = p->br;
522
523 brnet = net_generic(state->net, brnf_net_id);
524 if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
525 is_pppoe_ipv6(skb, state->net)) {
526 if (!brnet->call_ip6tables &&
527 !br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
528 return NF_ACCEPT;
529 if (!ipv6_mod_enabled()) {
530 pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported.");
531 return NF_DROP_REASON(skb, SKB_DROP_REASON_IPV6DISABLED, 0);
532 }
533
534 nf_bridge_pull_encap_header_rcsum(skb);
535 return br_nf_pre_routing_ipv6(priv, skb, state);
536 }
537
538 if (!brnet->call_iptables && !br_opt_get(br, BROPT_NF_CALL_IPTABLES))
539 return NF_ACCEPT;
540
541 if (!IS_IP(skb) && !is_vlan_ip(skb, state->net) &&
542 !is_pppoe_ip(skb, state->net))
543 return NF_ACCEPT;
544
545 nf_bridge_pull_encap_header_rcsum(skb);
546
547 if (br_validate_ipv4(state->net, skb))
548 return NF_DROP_REASON(skb, SKB_DROP_REASON_IP_INHDR, 0);
549
550 if (!nf_bridge_alloc(skb))
551 return NF_DROP_REASON(skb, SKB_DROP_REASON_NOMEM, 0);
552 if (!setup_pre_routing(skb, state->net))
553 return NF_DROP_REASON(skb, SKB_DROP_REASON_DEV_READY, 0);
554
555 nf_bridge = nf_bridge_info_get(skb);
556 nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
557
558 skb->protocol = htons(ETH_P_IP);
559 skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
560
561 NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
562 skb->dev, NULL,
563 br_nf_pre_routing_finish);
564
565 return NF_STOLEN;
566 }
567
568 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
569 /* conntracks' nf_confirm logic cannot handle cloned skbs referencing
570 * the same nf_conn entry, which will happen for multicast (broadcast)
571 * Frames on bridges.
572 *
573 * Example:
574 * macvlan0
575 * br0
576 * ethX ethY
577 *
578 * ethX (or Y) receives multicast or broadcast packet containing
579 * an IP packet, not yet in conntrack table.
580 *
581 * 1. skb passes through bridge and fake-ip (br_netfilter)Prerouting.
582 * -> skb->_nfct now references a unconfirmed entry
583 * 2. skb is broad/mcast packet. bridge now passes clones out on each bridge
584 * interface.
585 * 3. skb gets passed up the stack.
586 * 4. In macvlan case, macvlan driver retains clone(s) of the mcast skb
587 * and schedules a work queue to send them out on the lower devices.
588 *
589 * The clone skb->_nfct is not a copy, it is the same entry as the
590 * original skb. The macvlan rx handler then returns RX_HANDLER_PASS.
591 * 5. Normal conntrack hooks (in NF_INET_LOCAL_IN) confirm the orig skb.
592 *
593 * The Macvlan broadcast worker and normal confirm path will race.
594 *
595 * This race will not happen if step 2 already confirmed a clone. In that
596 * case later steps perform skb_clone() with skb->_nfct already confirmed (in
597 * hash table). This works fine.
598 *
599 * But such confirmation won't happen when eb/ip/nftables rules dropped the
600 * packets before they reached the nf_confirm step in postrouting.
601 *
602 * Work around this problem by explicit confirmation of the entry at
603 * LOCAL_IN time, before upper layer has a chance to clone the unconfirmed
604 * entry.
605 *
606 */
br_nf_local_in(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)607 static unsigned int br_nf_local_in(void *priv,
608 struct sk_buff *skb,
609 const struct nf_hook_state *state)
610 {
611 bool promisc = BR_INPUT_SKB_CB(skb)->promisc;
612 struct nf_conntrack *nfct = skb_nfct(skb);
613 const struct nf_ct_hook *ct_hook;
614 struct nf_conn *ct;
615 int ret;
616
617 if (promisc) {
618 nf_reset_ct(skb);
619 return NF_ACCEPT;
620 }
621
622 if (!nfct || skb->pkt_type == PACKET_HOST)
623 return NF_ACCEPT;
624
625 ct = container_of(nfct, struct nf_conn, ct_general);
626 if (likely(nf_ct_is_confirmed(ct)))
627 return NF_ACCEPT;
628
629 if (WARN_ON_ONCE(refcount_read(&nfct->use) != 1)) {
630 nf_reset_ct(skb);
631 return NF_ACCEPT;
632 }
633
634 WARN_ON_ONCE(skb_shared(skb));
635
636 /* We can't call nf_confirm here, it would create a dependency
637 * on nf_conntrack module.
638 */
639 ct_hook = rcu_dereference(nf_ct_hook);
640 if (!ct_hook) {
641 skb->_nfct = 0ul;
642 nf_conntrack_put(nfct);
643 return NF_ACCEPT;
644 }
645
646 nf_bridge_pull_encap_header(skb);
647 ret = ct_hook->confirm(skb);
648 switch (ret & NF_VERDICT_MASK) {
649 case NF_STOLEN:
650 return NF_STOLEN;
651 default:
652 nf_bridge_push_encap_header(skb);
653 break;
654 }
655
656 return ret;
657 }
658 #endif
659
660 /* PF_BRIDGE/FORWARD *************************************************/
br_nf_forward_finish(struct net * net,struct sock * sk,struct sk_buff * skb)661 static int br_nf_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
662 {
663 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
664 struct net_device *in;
665
666 if (!IS_ARP(skb) && !is_vlan_arp(skb, net)) {
667
668 if (skb->protocol == htons(ETH_P_IP))
669 nf_bridge->frag_max_size = IPCB(skb)->frag_max_size;
670
671 if (skb->protocol == htons(ETH_P_IPV6))
672 nf_bridge->frag_max_size = IP6CB(skb)->frag_max_size;
673
674 in = nf_bridge_get_physindev(skb, net);
675 if (!in) {
676 kfree_skb(skb);
677 return 0;
678 }
679 if (nf_bridge->pkt_otherhost) {
680 skb->pkt_type = PACKET_OTHERHOST;
681 nf_bridge->pkt_otherhost = false;
682 }
683 nf_bridge_update_protocol(skb);
684 } else {
685 in = *((struct net_device **)(skb->cb));
686 }
687 nf_bridge_push_encap_header(skb);
688
689 br_nf_hook_thresh(NF_BR_FORWARD, net, sk, skb, in, skb->dev,
690 br_forward_finish);
691 return 0;
692 }
693
694
br_nf_forward_ip(struct sk_buff * skb,const struct nf_hook_state * state,u8 pf)695 static unsigned int br_nf_forward_ip(struct sk_buff *skb,
696 const struct nf_hook_state *state,
697 u8 pf)
698 {
699 struct nf_bridge_info *nf_bridge;
700 struct net_device *parent;
701
702 nf_bridge = nf_bridge_info_get(skb);
703 if (!nf_bridge)
704 return NF_ACCEPT;
705
706 /* Need exclusive nf_bridge_info since we might have multiple
707 * different physoutdevs. */
708 if (!nf_bridge_unshare(skb))
709 return NF_DROP_REASON(skb, SKB_DROP_REASON_NOMEM, 0);
710
711 nf_bridge = nf_bridge_info_get(skb);
712 if (!nf_bridge)
713 return NF_DROP_REASON(skb, SKB_DROP_REASON_NOMEM, 0);
714
715 parent = bridge_parent(state->out);
716 if (!parent)
717 return NF_DROP_REASON(skb, SKB_DROP_REASON_DEV_READY, 0);
718
719 nf_bridge_pull_encap_header(skb);
720
721 if (skb->pkt_type == PACKET_OTHERHOST) {
722 skb->pkt_type = PACKET_HOST;
723 nf_bridge->pkt_otherhost = true;
724 }
725
726 if (pf == NFPROTO_IPV4) {
727 if (br_validate_ipv4(state->net, skb))
728 return NF_DROP_REASON(skb, SKB_DROP_REASON_IP_INHDR, 0);
729 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
730 skb->protocol = htons(ETH_P_IP);
731 } else if (pf == NFPROTO_IPV6) {
732 if (br_validate_ipv6(state->net, skb))
733 return NF_DROP_REASON(skb, SKB_DROP_REASON_IP_INHDR, 0);
734 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
735 skb->protocol = htons(ETH_P_IPV6);
736 } else {
737 WARN_ON_ONCE(1);
738 return NF_DROP;
739 }
740
741 nf_bridge->physoutdev = skb->dev;
742
743 NF_HOOK(pf, NF_INET_FORWARD, state->net, NULL, skb,
744 brnf_get_logical_dev(skb, state->in, state->net),
745 parent, br_nf_forward_finish);
746
747 return NF_STOLEN;
748 }
749
br_nf_forward_arp(struct sk_buff * skb,const struct nf_hook_state * state)750 static unsigned int br_nf_forward_arp(struct sk_buff *skb,
751 const struct nf_hook_state *state)
752 {
753 struct net_bridge_port *p;
754 struct net_bridge *br;
755 struct net_device **d = (struct net_device **)(skb->cb);
756 struct brnf_net *brnet;
757
758 p = br_port_get_rcu(state->out);
759 if (p == NULL)
760 return NF_ACCEPT;
761 br = p->br;
762
763 brnet = net_generic(state->net, brnf_net_id);
764 if (!brnet->call_arptables && !br_opt_get(br, BROPT_NF_CALL_ARPTABLES))
765 return NF_ACCEPT;
766
767 if (is_vlan_arp(skb, state->net))
768 nf_bridge_pull_encap_header(skb);
769
770 if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
771 return NF_DROP_REASON(skb, SKB_DROP_REASON_PKT_TOO_SMALL, 0);
772
773 if (arp_hdr(skb)->ar_pln != 4) {
774 if (is_vlan_arp(skb, state->net))
775 nf_bridge_push_encap_header(skb);
776 return NF_ACCEPT;
777 }
778 *d = state->in;
779 NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, state->net, state->sk, skb,
780 state->in, state->out, br_nf_forward_finish);
781
782 return NF_STOLEN;
783 }
784
785 /* This is the 'purely bridged' case. For IP, we pass the packet to
786 * netfilter with indev and outdev set to the bridge device,
787 * but we are still able to filter on the 'real' indev/outdev
788 * because of the physdev module. For ARP, indev and outdev are the
789 * bridge ports.
790 */
br_nf_forward(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)791 static unsigned int br_nf_forward(void *priv,
792 struct sk_buff *skb,
793 const struct nf_hook_state *state)
794 {
795 if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
796 is_pppoe_ip(skb, state->net))
797 return br_nf_forward_ip(skb, state, NFPROTO_IPV4);
798 if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
799 is_pppoe_ipv6(skb, state->net))
800 return br_nf_forward_ip(skb, state, NFPROTO_IPV6);
801 if (IS_ARP(skb) || is_vlan_arp(skb, state->net))
802 return br_nf_forward_arp(skb, state);
803
804 return NF_ACCEPT;
805 }
806
br_nf_push_frag_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)807 static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
808 {
809 struct brnf_frag_data *data;
810 int err;
811
812 data = this_cpu_ptr(&brnf_frag_data_storage);
813 err = skb_cow_head(skb, data->size);
814
815 if (err) {
816 kfree_skb(skb);
817 return 0;
818 }
819
820 if (data->vlan_proto)
821 __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
822
823 skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
824 __skb_push(skb, data->encap_size);
825
826 nf_bridge_info_free(skb);
827 return br_dev_queue_push_xmit(net, sk, skb);
828 }
829
830 static int
br_nf_ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))831 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
832 int (*output)(struct net *, struct sock *, struct sk_buff *))
833 {
834 unsigned int mtu = ip_skb_dst_mtu(sk, skb);
835 struct iphdr *iph = ip_hdr(skb);
836
837 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
838 (IPCB(skb)->frag_max_size &&
839 IPCB(skb)->frag_max_size > mtu))) {
840 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
841 kfree_skb(skb);
842 return -EMSGSIZE;
843 }
844
845 return ip_do_fragment(net, sk, skb, output);
846 }
847
nf_bridge_mtu_reduction(const struct sk_buff * skb)848 static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
849 {
850 const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
851
852 if (nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
853 return PPPOE_SES_HLEN;
854 return 0;
855 }
856
br_nf_dev_queue_xmit(struct net * net,struct sock * sk,struct sk_buff * skb)857 static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
858 {
859 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
860 unsigned int mtu, mtu_reserved;
861 int ret;
862
863 mtu_reserved = nf_bridge_mtu_reduction(skb);
864 mtu = skb->dev->mtu;
865
866 if (nf_bridge->pkt_otherhost) {
867 skb->pkt_type = PACKET_OTHERHOST;
868 nf_bridge->pkt_otherhost = false;
869 }
870
871 if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
872 mtu = nf_bridge->frag_max_size;
873
874 nf_bridge_update_protocol(skb);
875 nf_bridge_push_encap_header(skb);
876
877 if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
878 nf_bridge_info_free(skb);
879 return br_dev_queue_push_xmit(net, sk, skb);
880 }
881
882 /* Fragmentation on metadata/template dst is not supported */
883 if (unlikely(!skb_valid_dst(skb)))
884 goto drop;
885
886 /* This is wrong! We should preserve the original fragment
887 * boundaries by preserving frag_list rather than refragmenting.
888 */
889 if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) &&
890 skb->protocol == htons(ETH_P_IP)) {
891 struct brnf_frag_data *data;
892
893 if (br_validate_ipv4(net, skb))
894 goto drop;
895
896 IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
897
898 local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
899 data = this_cpu_ptr(&brnf_frag_data_storage);
900
901 if (skb_vlan_tag_present(skb)) {
902 data->vlan_tci = skb->vlan_tci;
903 data->vlan_proto = skb->vlan_proto;
904 } else {
905 data->vlan_proto = 0;
906 }
907
908 data->encap_size = nf_bridge_encap_header_len(skb);
909 data->size = ETH_HLEN + data->encap_size;
910
911 skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
912 data->size);
913
914 ret = br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit);
915 local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
916 return ret;
917 }
918 if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) &&
919 skb->protocol == htons(ETH_P_IPV6)) {
920 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
921 struct brnf_frag_data *data;
922
923 if (br_validate_ipv6(net, skb))
924 goto drop;
925
926 IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
927
928 local_lock_nested_bh(&brnf_frag_data_storage.bh_lock);
929 data = this_cpu_ptr(&brnf_frag_data_storage);
930 data->encap_size = nf_bridge_encap_header_len(skb);
931 data->size = ETH_HLEN + data->encap_size;
932
933 skb_copy_from_linear_data_offset(skb, -data->size, data->mac,
934 data->size);
935
936 if (v6ops) {
937 ret = v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit);
938 local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
939 return ret;
940 }
941 local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock);
942
943 kfree_skb(skb);
944 return -EMSGSIZE;
945 }
946 nf_bridge_info_free(skb);
947 return br_dev_queue_push_xmit(net, sk, skb);
948 drop:
949 kfree_skb(skb);
950 return 0;
951 }
952
953 /* PF_BRIDGE/POST_ROUTING ********************************************/
br_nf_post_routing(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)954 static unsigned int br_nf_post_routing(void *priv,
955 struct sk_buff *skb,
956 const struct nf_hook_state *state)
957 {
958 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
959 struct net_device *realoutdev = bridge_parent(skb->dev);
960 u_int8_t pf;
961
962 /* if nf_bridge is set, but ->physoutdev is NULL, this packet came in
963 * on a bridge, but was delivered locally and is now being routed:
964 *
965 * POST_ROUTING was already invoked from the ip stack.
966 */
967 if (!nf_bridge || !nf_bridge->physoutdev)
968 return NF_ACCEPT;
969
970 if (!realoutdev)
971 return NF_DROP_REASON(skb, SKB_DROP_REASON_DEV_READY, 0);
972
973 if (IS_IP(skb) || is_vlan_ip(skb, state->net) ||
974 is_pppoe_ip(skb, state->net))
975 pf = NFPROTO_IPV4;
976 else if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) ||
977 is_pppoe_ipv6(skb, state->net))
978 pf = NFPROTO_IPV6;
979 else
980 return NF_ACCEPT;
981
982 if (skb->pkt_type == PACKET_OTHERHOST) {
983 skb->pkt_type = PACKET_HOST;
984 nf_bridge->pkt_otherhost = true;
985 }
986
987 nf_bridge_pull_encap_header(skb);
988 if (pf == NFPROTO_IPV4)
989 skb->protocol = htons(ETH_P_IP);
990 else
991 skb->protocol = htons(ETH_P_IPV6);
992
993 NF_HOOK(pf, NF_INET_POST_ROUTING, state->net, state->sk, skb,
994 NULL, realoutdev,
995 br_nf_dev_queue_xmit);
996
997 return NF_STOLEN;
998 }
999
1000 /* IP/SABOTAGE *****************************************************/
1001 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
1002 * for the second time. */
ip_sabotage_in(void * priv,struct sk_buff * skb,const struct nf_hook_state * state)1003 static unsigned int ip_sabotage_in(void *priv,
1004 struct sk_buff *skb,
1005 const struct nf_hook_state *state)
1006 {
1007 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
1008
1009 if (nf_bridge) {
1010 if (nf_bridge->sabotage_in_done)
1011 return NF_ACCEPT;
1012
1013 if (!nf_bridge->in_prerouting &&
1014 !netif_is_l3_master(skb->dev) &&
1015 !netif_is_l3_slave(skb->dev)) {
1016 nf_bridge->sabotage_in_done = 1;
1017 state->okfn(state->net, state->sk, skb);
1018 return NF_STOLEN;
1019 }
1020 }
1021
1022 return NF_ACCEPT;
1023 }
1024
1025 /* This is called when br_netfilter has called into iptables/netfilter,
1026 * and DNAT has taken place on a bridge-forwarded packet.
1027 *
1028 * neigh->output has created a new MAC header, with local br0 MAC
1029 * as saddr.
1030 *
1031 * This restores the original MAC saddr of the bridged packet
1032 * before invoking bridge forward logic to transmit the packet.
1033 */
br_nf_pre_routing_finish_bridge_slow(struct sk_buff * skb)1034 static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
1035 {
1036 struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
1037 struct net_device *br_indev;
1038
1039 br_indev = nf_bridge_get_physindev(skb, dev_net(skb->dev));
1040 if (!br_indev) {
1041 kfree_skb(skb);
1042 return;
1043 }
1044
1045 skb_pull(skb, ETH_HLEN);
1046 nf_bridge->bridged_dnat = 0;
1047
1048 BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));
1049
1050 skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
1051 nf_bridge->neigh_header,
1052 ETH_HLEN - ETH_ALEN);
1053 skb->dev = br_indev;
1054
1055 nf_bridge->physoutdev = NULL;
1056 br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
1057 }
1058
br_nf_dev_xmit(struct sk_buff * skb)1059 static int br_nf_dev_xmit(struct sk_buff *skb)
1060 {
1061 const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
1062
1063 if (nf_bridge && nf_bridge->bridged_dnat) {
1064 br_nf_pre_routing_finish_bridge_slow(skb);
1065 return 1;
1066 }
1067 return 0;
1068 }
1069
1070 static const struct nf_br_ops br_ops = {
1071 .br_dev_xmit_hook = br_nf_dev_xmit,
1072 };
1073
1074 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
1075 * br_dev_queue_push_xmit is called afterwards */
1076 static const struct nf_hook_ops br_nf_ops[] = {
1077 {
1078 .hook = br_nf_pre_routing,
1079 .pf = NFPROTO_BRIDGE,
1080 .hooknum = NF_BR_PRE_ROUTING,
1081 .priority = NF_BR_PRI_BRNF,
1082 },
1083 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
1084 {
1085 .hook = br_nf_local_in,
1086 .pf = NFPROTO_BRIDGE,
1087 .hooknum = NF_BR_LOCAL_IN,
1088 .priority = NF_BR_PRI_LAST,
1089 },
1090 #endif
1091 {
1092 .hook = br_nf_forward,
1093 .pf = NFPROTO_BRIDGE,
1094 .hooknum = NF_BR_FORWARD,
1095 .priority = NF_BR_PRI_BRNF,
1096 },
1097 {
1098 .hook = br_nf_post_routing,
1099 .pf = NFPROTO_BRIDGE,
1100 .hooknum = NF_BR_POST_ROUTING,
1101 .priority = NF_BR_PRI_LAST,
1102 },
1103 {
1104 .hook = ip_sabotage_in,
1105 .pf = NFPROTO_IPV4,
1106 .hooknum = NF_INET_PRE_ROUTING,
1107 .priority = NF_IP_PRI_FIRST,
1108 },
1109 {
1110 .hook = ip_sabotage_in,
1111 .pf = NFPROTO_IPV6,
1112 .hooknum = NF_INET_PRE_ROUTING,
1113 .priority = NF_IP6_PRI_FIRST,
1114 },
1115 };
1116
brnf_device_event(struct notifier_block * unused,unsigned long event,void * ptr)1117 static int brnf_device_event(struct notifier_block *unused, unsigned long event,
1118 void *ptr)
1119 {
1120 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1121 struct brnf_net *brnet;
1122 struct net *net;
1123 int ret;
1124
1125 if (event != NETDEV_REGISTER || !netif_is_bridge_master(dev))
1126 return NOTIFY_DONE;
1127
1128 ASSERT_RTNL();
1129
1130 net = dev_net(dev);
1131 brnet = net_generic(net, brnf_net_id);
1132 if (brnet->enabled)
1133 return NOTIFY_OK;
1134
1135 ret = nf_register_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
1136 if (ret)
1137 return NOTIFY_BAD;
1138
1139 brnet->enabled = true;
1140 return NOTIFY_OK;
1141 }
1142
1143 static struct notifier_block brnf_notifier __read_mostly = {
1144 .notifier_call = brnf_device_event,
1145 };
1146
1147 /* recursively invokes nf_hook_slow (again), skipping already-called
1148 * hooks (< NF_BR_PRI_BRNF).
1149 *
1150 * Called with rcu read lock held.
1151 */
br_nf_hook_thresh(unsigned int hook,struct net * net,struct sock * sk,struct sk_buff * skb,struct net_device * indev,struct net_device * outdev,int (* okfn)(struct net *,struct sock *,struct sk_buff *))1152 int br_nf_hook_thresh(unsigned int hook, struct net *net,
1153 struct sock *sk, struct sk_buff *skb,
1154 struct net_device *indev,
1155 struct net_device *outdev,
1156 int (*okfn)(struct net *, struct sock *,
1157 struct sk_buff *))
1158 {
1159 const struct nf_hook_entries *e;
1160 struct nf_hook_state state;
1161 struct nf_hook_ops **ops;
1162 unsigned int i;
1163 int ret;
1164
1165 e = rcu_dereference(net->nf.hooks_bridge[hook]);
1166 if (!e)
1167 return okfn(net, sk, skb);
1168
1169 ops = nf_hook_entries_get_hook_ops(e);
1170 for (i = 0; i < e->num_hook_entries; i++) {
1171 /* These hooks have already been called */
1172 if (ops[i]->priority < NF_BR_PRI_BRNF)
1173 continue;
1174
1175 /* These hooks have not been called yet, run them. */
1176 if (ops[i]->priority > NF_BR_PRI_BRNF)
1177 break;
1178
1179 /* take a closer look at NF_BR_PRI_BRNF. */
1180 if (ops[i]->hook == br_nf_pre_routing) {
1181 /* This hook diverted the skb to this function,
1182 * hooks after this have not been run yet.
1183 */
1184 i++;
1185 break;
1186 }
1187 }
1188
1189 nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev,
1190 sk, net, okfn);
1191
1192 ret = nf_hook_slow(skb, &state, e, i);
1193 if (ret == 1)
1194 ret = okfn(net, sk, skb);
1195
1196 return ret;
1197 }
1198
1199 #ifdef CONFIG_SYSCTL
1200 static
brnf_sysctl_call_tables(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)1201 int brnf_sysctl_call_tables(const struct ctl_table *ctl, int write,
1202 void *buffer, size_t *lenp, loff_t *ppos)
1203 {
1204 int ret;
1205
1206 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
1207
1208 if (write && *(int *)(ctl->data))
1209 *(int *)(ctl->data) = 1;
1210 return ret;
1211 }
1212
1213 static struct ctl_table brnf_table[] = {
1214 {
1215 .procname = "bridge-nf-call-arptables",
1216 .maxlen = sizeof(int),
1217 .mode = 0644,
1218 .proc_handler = brnf_sysctl_call_tables,
1219 },
1220 {
1221 .procname = "bridge-nf-call-iptables",
1222 .maxlen = sizeof(int),
1223 .mode = 0644,
1224 .proc_handler = brnf_sysctl_call_tables,
1225 },
1226 {
1227 .procname = "bridge-nf-call-ip6tables",
1228 .maxlen = sizeof(int),
1229 .mode = 0644,
1230 .proc_handler = brnf_sysctl_call_tables,
1231 },
1232 {
1233 .procname = "bridge-nf-filter-vlan-tagged",
1234 .maxlen = sizeof(int),
1235 .mode = 0644,
1236 .proc_handler = brnf_sysctl_call_tables,
1237 },
1238 {
1239 .procname = "bridge-nf-filter-pppoe-tagged",
1240 .maxlen = sizeof(int),
1241 .mode = 0644,
1242 .proc_handler = brnf_sysctl_call_tables,
1243 },
1244 {
1245 .procname = "bridge-nf-pass-vlan-input-dev",
1246 .maxlen = sizeof(int),
1247 .mode = 0644,
1248 .proc_handler = brnf_sysctl_call_tables,
1249 },
1250 };
1251
br_netfilter_sysctl_default(struct brnf_net * brnf)1252 static inline void br_netfilter_sysctl_default(struct brnf_net *brnf)
1253 {
1254 brnf->call_iptables = 1;
1255 brnf->call_ip6tables = 1;
1256 brnf->call_arptables = 1;
1257 brnf->filter_vlan_tagged = 0;
1258 brnf->filter_pppoe_tagged = 0;
1259 brnf->pass_vlan_indev = 0;
1260 }
1261
br_netfilter_sysctl_init_net(struct net * net)1262 static int br_netfilter_sysctl_init_net(struct net *net)
1263 {
1264 struct ctl_table *table = brnf_table;
1265 struct brnf_net *brnet;
1266
1267 if (!net_eq(net, &init_net)) {
1268 table = kmemdup(table, sizeof(brnf_table), GFP_KERNEL);
1269 if (!table)
1270 return -ENOMEM;
1271 }
1272
1273 brnet = net_generic(net, brnf_net_id);
1274 table[0].data = &brnet->call_arptables;
1275 table[1].data = &brnet->call_iptables;
1276 table[2].data = &brnet->call_ip6tables;
1277 table[3].data = &brnet->filter_vlan_tagged;
1278 table[4].data = &brnet->filter_pppoe_tagged;
1279 table[5].data = &brnet->pass_vlan_indev;
1280
1281 br_netfilter_sysctl_default(brnet);
1282
1283 brnet->ctl_hdr = register_net_sysctl_sz(net, "net/bridge", table,
1284 ARRAY_SIZE(brnf_table));
1285 if (!brnet->ctl_hdr) {
1286 if (!net_eq(net, &init_net))
1287 kfree(table);
1288
1289 return -ENOMEM;
1290 }
1291
1292 return 0;
1293 }
1294
br_netfilter_sysctl_exit_net(struct net * net,struct brnf_net * brnet)1295 static void br_netfilter_sysctl_exit_net(struct net *net,
1296 struct brnf_net *brnet)
1297 {
1298 const struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg;
1299
1300 unregister_net_sysctl_table(brnet->ctl_hdr);
1301 if (!net_eq(net, &init_net))
1302 kfree(table);
1303 }
1304
brnf_init_net(struct net * net)1305 static int __net_init brnf_init_net(struct net *net)
1306 {
1307 return br_netfilter_sysctl_init_net(net);
1308 }
1309 #endif
1310
brnf_exit_net(struct net * net)1311 static void __net_exit brnf_exit_net(struct net *net)
1312 {
1313 struct brnf_net *brnet;
1314
1315 brnet = net_generic(net, brnf_net_id);
1316 if (brnet->enabled) {
1317 nf_unregister_net_hooks(net, br_nf_ops, ARRAY_SIZE(br_nf_ops));
1318 brnet->enabled = false;
1319 }
1320
1321 #ifdef CONFIG_SYSCTL
1322 br_netfilter_sysctl_exit_net(net, brnet);
1323 #endif
1324 }
1325
1326 static struct pernet_operations brnf_net_ops __read_mostly = {
1327 #ifdef CONFIG_SYSCTL
1328 .init = brnf_init_net,
1329 #endif
1330 .exit = brnf_exit_net,
1331 .id = &brnf_net_id,
1332 .size = sizeof(struct brnf_net),
1333 };
1334
br_netfilter_init(void)1335 static int __init br_netfilter_init(void)
1336 {
1337 int ret;
1338
1339 ret = register_pernet_subsys(&brnf_net_ops);
1340 if (ret < 0)
1341 return ret;
1342
1343 ret = register_netdevice_notifier(&brnf_notifier);
1344 if (ret < 0) {
1345 unregister_pernet_subsys(&brnf_net_ops);
1346 return ret;
1347 }
1348
1349 RCU_INIT_POINTER(nf_br_ops, &br_ops);
1350 printk(KERN_NOTICE "Bridge firewalling registered\n");
1351 return 0;
1352 }
1353
br_netfilter_fini(void)1354 static void __exit br_netfilter_fini(void)
1355 {
1356 RCU_INIT_POINTER(nf_br_ops, NULL);
1357 unregister_netdevice_notifier(&brnf_notifier);
1358 unregister_pernet_subsys(&brnf_net_ops);
1359 }
1360
1361 module_init(br_netfilter_init);
1362 module_exit(br_netfilter_fini);
1363
1364 MODULE_LICENSE("GPL");
1365 MODULE_AUTHOR("Lennert Buytenhek <buytenh@gnu.org>");
1366 MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
1367 MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");
1368