1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Handle incoming frames
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
8 */
9
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/netfilter_bridge.h>
15 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
16 #include <net/netfilter/nf_queue.h>
17 #endif
18 #include <linux/neighbour.h>
19 #include <net/arp.h>
20 #include <net/dsa.h>
21 #include <linux/export.h>
22 #include <linux/rculist.h>
23 #include "br_private.h"
24 #include "br_private_tunnel.h"
25
26 static int
br_netif_receive_skb(struct net * net,struct sock * sk,struct sk_buff * skb)27 br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
28 {
29 br_drop_fake_rtable(skb);
30 return netif_receive_skb(skb);
31 }
32
br_pass_frame_up(struct sk_buff * skb)33 static int br_pass_frame_up(struct sk_buff *skb)
34 {
35 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
36 struct net_bridge *br = netdev_priv(brdev);
37 struct net_bridge_vlan_group *vg;
38 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
39
40 u64_stats_update_begin(&brstats->syncp);
41 brstats->rx_packets++;
42 brstats->rx_bytes += skb->len;
43 u64_stats_update_end(&brstats->syncp);
44
45 vg = br_vlan_group_rcu(br);
46
47 /* Reset the offload_fwd_mark because there could be a stacked
48 * bridge above, and it should not think this bridge it doing
49 * that bridge's work forwarding out its ports.
50 */
51 br_switchdev_frame_unmark(skb);
52
53 /* Bridge is just like any other port. Make sure the
54 * packet is allowed except in promisc modue when someone
55 * may be running packet capture.
56 */
57 if (!(brdev->flags & IFF_PROMISC) &&
58 !br_allowed_egress(vg, skb)) {
59 kfree_skb(skb);
60 return NET_RX_DROP;
61 }
62
63 indev = skb->dev;
64 skb->dev = brdev;
65 skb = br_handle_vlan(br, NULL, vg, skb);
66 if (!skb)
67 return NET_RX_DROP;
68 /* update the multicast stats if the packet is IGMP/MLD */
69 br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
70 BR_MCAST_DIR_TX);
71
72 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
73 dev_net(indev), NULL, skb, indev, NULL,
74 br_netif_receive_skb);
75 }
76
77 /* note: already called with rcu_read_lock */
br_handle_frame_finish(struct net * net,struct sock * sk,struct sk_buff * skb)78 int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
79 {
80 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
81 enum br_pkt_type pkt_type = BR_PKT_UNICAST;
82 struct net_bridge_fdb_entry *dst = NULL;
83 struct net_bridge_mdb_entry *mdst;
84 bool local_rcv, mcast_hit = false;
85 struct net_bridge *br;
86 u16 vid = 0;
87 u8 state;
88
89 if (!p || p->state == BR_STATE_DISABLED)
90 goto drop;
91
92 state = p->state;
93 if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid,
94 &state))
95 goto out;
96
97 nbp_switchdev_frame_mark(p, skb);
98
99 /* insert into forwarding database after filtering to avoid spoofing */
100 br = p->br;
101 if (p->flags & BR_LEARNING)
102 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
103
104 local_rcv = !!(br->dev->flags & IFF_PROMISC);
105 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
106 /* by definition the broadcast is also a multicast address */
107 if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
108 pkt_type = BR_PKT_BROADCAST;
109 local_rcv = true;
110 } else {
111 pkt_type = BR_PKT_MULTICAST;
112 if (br_multicast_rcv(br, p, skb, vid))
113 goto drop;
114 }
115 }
116
117 if (state == BR_STATE_LEARNING)
118 goto drop;
119
120 BR_INPUT_SKB_CB(skb)->brdev = br->dev;
121 BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED);
122
123 if (IS_ENABLED(CONFIG_INET) &&
124 (skb->protocol == htons(ETH_P_ARP) ||
125 skb->protocol == htons(ETH_P_RARP))) {
126 br_do_proxy_suppress_arp(skb, br, vid, p);
127 } else if (IS_ENABLED(CONFIG_IPV6) &&
128 skb->protocol == htons(ETH_P_IPV6) &&
129 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
130 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
131 sizeof(struct nd_msg)) &&
132 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
133 struct nd_msg *msg, _msg;
134
135 msg = br_is_nd_neigh_msg(skb, &_msg);
136 if (msg)
137 br_do_suppress_nd(skb, br, vid, p, msg);
138 }
139
140 switch (pkt_type) {
141 case BR_PKT_MULTICAST:
142 mdst = br_mdb_get(br, skb, vid);
143 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
144 br_multicast_querier_exists(br, eth_hdr(skb))) {
145 if ((mdst && mdst->host_joined) ||
146 br_multicast_is_router(br)) {
147 local_rcv = true;
148 br->dev->stats.multicast++;
149 }
150 mcast_hit = true;
151 } else {
152 local_rcv = true;
153 br->dev->stats.multicast++;
154 }
155 break;
156 case BR_PKT_UNICAST:
157 dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
158 default:
159 break;
160 }
161
162 if (dst) {
163 unsigned long now = jiffies;
164
165 if (test_bit(BR_FDB_LOCAL, &dst->flags))
166 return br_pass_frame_up(skb);
167
168 if (now != dst->used)
169 dst->used = now;
170 br_forward(dst->dst, skb, local_rcv, false);
171 } else {
172 if (!mcast_hit)
173 br_flood(br, skb, pkt_type, local_rcv, false);
174 else
175 br_multicast_flood(mdst, skb, local_rcv, false);
176 }
177
178 if (local_rcv)
179 return br_pass_frame_up(skb);
180
181 out:
182 return 0;
183 drop:
184 kfree_skb(skb);
185 goto out;
186 }
187 EXPORT_SYMBOL_GPL(br_handle_frame_finish);
188
__br_handle_local_finish(struct sk_buff * skb)189 static void __br_handle_local_finish(struct sk_buff *skb)
190 {
191 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
192 u16 vid = 0;
193
194 /* check if vlan is allowed, to avoid spoofing */
195 if ((p->flags & BR_LEARNING) &&
196 nbp_state_should_learn(p) &&
197 !br_opt_get(p->br, BROPT_NO_LL_LEARN) &&
198 br_should_learn(p, skb, &vid))
199 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0);
200 }
201
202 /* note: already called with rcu_read_lock */
br_handle_local_finish(struct net * net,struct sock * sk,struct sk_buff * skb)203 static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
204 {
205 __br_handle_local_finish(skb);
206
207 /* return 1 to signal the okfn() was called so it's ok to use the skb */
208 return 1;
209 }
210
nf_hook_bridge_pre(struct sk_buff * skb,struct sk_buff ** pskb)211 static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb)
212 {
213 #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
214 struct nf_hook_entries *e = NULL;
215 struct nf_hook_state state;
216 unsigned int verdict, i;
217 struct net *net;
218 int ret;
219
220 net = dev_net(skb->dev);
221 #ifdef HAVE_JUMP_LABEL
222 if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING]))
223 goto frame_finish;
224 #endif
225
226 e = rcu_dereference(net->nf.hooks_bridge[NF_BR_PRE_ROUTING]);
227 if (!e)
228 goto frame_finish;
229
230 nf_hook_state_init(&state, NF_BR_PRE_ROUTING,
231 NFPROTO_BRIDGE, skb->dev, NULL, NULL,
232 net, br_handle_frame_finish);
233
234 for (i = 0; i < e->num_hook_entries; i++) {
235 verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state);
236 switch (verdict & NF_VERDICT_MASK) {
237 case NF_ACCEPT:
238 if (BR_INPUT_SKB_CB(skb)->br_netfilter_broute) {
239 *pskb = skb;
240 return RX_HANDLER_PASS;
241 }
242 break;
243 case NF_DROP:
244 kfree_skb(skb);
245 return RX_HANDLER_CONSUMED;
246 case NF_QUEUE:
247 ret = nf_queue(skb, &state, i, verdict);
248 if (ret == 1)
249 continue;
250 return RX_HANDLER_CONSUMED;
251 default: /* STOLEN */
252 return RX_HANDLER_CONSUMED;
253 }
254 }
255 frame_finish:
256 net = dev_net(skb->dev);
257 br_handle_frame_finish(net, NULL, skb);
258 #else
259 br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
260 #endif
261 return RX_HANDLER_CONSUMED;
262 }
263
264 /*
265 * Return NULL if skb is handled
266 * note: already called with rcu_read_lock
267 */
br_handle_frame(struct sk_buff ** pskb)268 static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
269 {
270 struct net_bridge_port *p;
271 struct sk_buff *skb = *pskb;
272 const unsigned char *dest = eth_hdr(skb)->h_dest;
273
274 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
275 return RX_HANDLER_PASS;
276
277 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
278 goto drop;
279
280 skb = skb_share_check(skb, GFP_ATOMIC);
281 if (!skb)
282 return RX_HANDLER_CONSUMED;
283
284 memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
285
286 p = br_port_get_rcu(skb->dev);
287 if (p->flags & BR_VLAN_TUNNEL) {
288 if (br_handle_ingress_vlan_tunnel(skb, p,
289 nbp_vlan_group_rcu(p)))
290 goto drop;
291 }
292
293 if (unlikely(is_link_local_ether_addr(dest))) {
294 u16 fwd_mask = p->br->group_fwd_mask_required;
295
296 /*
297 * See IEEE 802.1D Table 7-10 Reserved addresses
298 *
299 * Assignment Value
300 * Bridge Group Address 01-80-C2-00-00-00
301 * (MAC Control) 802.3 01-80-C2-00-00-01
302 * (Link Aggregation) 802.3 01-80-C2-00-00-02
303 * 802.1X PAE address 01-80-C2-00-00-03
304 *
305 * 802.1AB LLDP 01-80-C2-00-00-0E
306 *
307 * Others reserved for future standardization
308 */
309 fwd_mask |= p->group_fwd_mask;
310 switch (dest[5]) {
311 case 0x00: /* Bridge Group Address */
312 /* If STP is turned off,
313 then must forward to keep loop detection */
314 if (p->br->stp_enabled == BR_NO_STP ||
315 fwd_mask & (1u << dest[5]))
316 goto forward;
317 *pskb = skb;
318 __br_handle_local_finish(skb);
319 return RX_HANDLER_PASS;
320
321 case 0x01: /* IEEE MAC (Pause) */
322 goto drop;
323
324 case 0x0E: /* 802.1AB LLDP */
325 fwd_mask |= p->br->group_fwd_mask;
326 if (fwd_mask & (1u << dest[5]))
327 goto forward;
328 *pskb = skb;
329 __br_handle_local_finish(skb);
330 return RX_HANDLER_PASS;
331
332 default:
333 /* Allow selective forwarding for most other protocols */
334 fwd_mask |= p->br->group_fwd_mask;
335 if (fwd_mask & (1u << dest[5]))
336 goto forward;
337 }
338
339 /* The else clause should be hit when nf_hook():
340 * - returns < 0 (drop/error)
341 * - returns = 0 (stolen/nf_queue)
342 * Thus return 1 from the okfn() to signal the skb is ok to pass
343 */
344 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
345 dev_net(skb->dev), NULL, skb, skb->dev, NULL,
346 br_handle_local_finish) == 1) {
347 return RX_HANDLER_PASS;
348 } else {
349 return RX_HANDLER_CONSUMED;
350 }
351 }
352
353 if (unlikely(br_mrp_process(p, skb)))
354 return RX_HANDLER_PASS;
355
356 forward:
357 switch (p->state) {
358 case BR_STATE_FORWARDING:
359 case BR_STATE_LEARNING:
360 if (ether_addr_equal(p->br->dev->dev_addr, dest))
361 skb->pkt_type = PACKET_HOST;
362
363 return nf_hook_bridge_pre(skb, pskb);
364 default:
365 drop:
366 kfree_skb(skb);
367 }
368 return RX_HANDLER_CONSUMED;
369 }
370
371 /* This function has no purpose other than to appease the br_port_get_rcu/rtnl
372 * helpers which identify bridged ports according to the rx_handler installed
373 * on them (so there _needs_ to be a bridge rx_handler even if we don't need it
374 * to do anything useful). This bridge won't support traffic to/from the stack,
375 * but only hardware bridging. So return RX_HANDLER_PASS so we don't steal
376 * frames from the ETH_P_XDSA packet_type handler.
377 */
br_handle_frame_dummy(struct sk_buff ** pskb)378 static rx_handler_result_t br_handle_frame_dummy(struct sk_buff **pskb)
379 {
380 return RX_HANDLER_PASS;
381 }
382
br_get_rx_handler(const struct net_device * dev)383 rx_handler_func_t *br_get_rx_handler(const struct net_device *dev)
384 {
385 if (netdev_uses_dsa(dev))
386 return br_handle_frame_dummy;
387
388 return br_handle_frame;
389 }
390