1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/netdevice.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/slab.h>
6 #include <net/switchdev.h>
7
8 #include "br_private.h"
9 #include "br_private_tunnel.h"
10
11 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
12
br_vlan_cmp(struct rhashtable_compare_arg * arg,const void * ptr)13 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
14 const void *ptr)
15 {
16 const struct net_bridge_vlan *vle = ptr;
17 u16 vid = *(u16 *)arg->key;
18
19 return vle->vid != vid;
20 }
21
22 static const struct rhashtable_params br_vlan_rht_params = {
23 .head_offset = offsetof(struct net_bridge_vlan, vnode),
24 .key_offset = offsetof(struct net_bridge_vlan, vid),
25 .key_len = sizeof(u16),
26 .nelem_hint = 3,
27 .max_size = VLAN_N_VID,
28 .obj_cmpfn = br_vlan_cmp,
29 .automatic_shrinking = true,
30 };
31
br_vlan_lookup(struct rhashtable * tbl,u16 vid)32 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
33 {
34 return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
35 }
36
__vlan_add_pvid(struct net_bridge_vlan_group * vg,const struct net_bridge_vlan * v)37 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
38 const struct net_bridge_vlan *v)
39 {
40 if (vg->pvid == v->vid)
41 return false;
42
43 smp_wmb();
44 br_vlan_set_pvid_state(vg, v->state);
45 vg->pvid = v->vid;
46
47 return true;
48 }
49
__vlan_delete_pvid(struct net_bridge_vlan_group * vg,u16 vid)50 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
51 {
52 if (vg->pvid != vid)
53 return false;
54
55 smp_wmb();
56 vg->pvid = 0;
57
58 return true;
59 }
60
61 /* return true if anything changed, false otherwise */
__vlan_add_flags(struct net_bridge_vlan * v,u16 flags)62 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
63 {
64 struct net_bridge_vlan_group *vg;
65 u16 old_flags = v->flags;
66 bool ret;
67
68 if (br_vlan_is_master(v))
69 vg = br_vlan_group(v->br);
70 else
71 vg = nbp_vlan_group(v->port);
72
73 if (flags & BRIDGE_VLAN_INFO_PVID)
74 ret = __vlan_add_pvid(vg, v);
75 else
76 ret = __vlan_delete_pvid(vg, v->vid);
77
78 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
79 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
80 else
81 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
82
83 return ret || !!(old_flags ^ v->flags);
84 }
85
__vlan_vid_add(struct net_device * dev,struct net_bridge * br,struct net_bridge_vlan * v,u16 flags,struct netlink_ext_ack * extack)86 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
87 struct net_bridge_vlan *v, u16 flags,
88 struct netlink_ext_ack *extack)
89 {
90 int err;
91
92 /* Try switchdev op first. In case it is not supported, fallback to
93 * 8021q add.
94 */
95 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
96 if (err == -EOPNOTSUPP)
97 return vlan_vid_add(dev, br->vlan_proto, v->vid);
98 v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
99 return err;
100 }
101
__vlan_add_list(struct net_bridge_vlan * v)102 static void __vlan_add_list(struct net_bridge_vlan *v)
103 {
104 struct net_bridge_vlan_group *vg;
105 struct list_head *headp, *hpos;
106 struct net_bridge_vlan *vent;
107
108 if (br_vlan_is_master(v))
109 vg = br_vlan_group(v->br);
110 else
111 vg = nbp_vlan_group(v->port);
112
113 headp = &vg->vlan_list;
114 list_for_each_prev(hpos, headp) {
115 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
116 if (v->vid < vent->vid)
117 continue;
118 else
119 break;
120 }
121 list_add_rcu(&v->vlist, hpos);
122 }
123
__vlan_del_list(struct net_bridge_vlan * v)124 static void __vlan_del_list(struct net_bridge_vlan *v)
125 {
126 list_del_rcu(&v->vlist);
127 }
128
__vlan_vid_del(struct net_device * dev,struct net_bridge * br,const struct net_bridge_vlan * v)129 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
130 const struct net_bridge_vlan *v)
131 {
132 int err;
133
134 /* Try switchdev op first. In case it is not supported, fallback to
135 * 8021q del.
136 */
137 err = br_switchdev_port_vlan_del(dev, v->vid);
138 if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
139 vlan_vid_del(dev, br->vlan_proto, v->vid);
140 return err == -EOPNOTSUPP ? 0 : err;
141 }
142
143 /* Returns a master vlan, if it didn't exist it gets created. In all cases
144 * a reference is taken to the master vlan before returning.
145 */
146 static struct net_bridge_vlan *
br_vlan_get_master(struct net_bridge * br,u16 vid,struct netlink_ext_ack * extack)147 br_vlan_get_master(struct net_bridge *br, u16 vid,
148 struct netlink_ext_ack *extack)
149 {
150 struct net_bridge_vlan_group *vg;
151 struct net_bridge_vlan *masterv;
152
153 vg = br_vlan_group(br);
154 masterv = br_vlan_find(vg, vid);
155 if (!masterv) {
156 bool changed;
157
158 /* missing global ctx, create it now */
159 if (br_vlan_add(br, vid, 0, &changed, extack))
160 return NULL;
161 masterv = br_vlan_find(vg, vid);
162 if (WARN_ON(!masterv))
163 return NULL;
164 refcount_set(&masterv->refcnt, 1);
165 return masterv;
166 }
167 refcount_inc(&masterv->refcnt);
168
169 return masterv;
170 }
171
br_master_vlan_rcu_free(struct rcu_head * rcu)172 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
173 {
174 struct net_bridge_vlan *v;
175
176 v = container_of(rcu, struct net_bridge_vlan, rcu);
177 WARN_ON(!br_vlan_is_master(v));
178 free_percpu(v->stats);
179 v->stats = NULL;
180 kfree(v);
181 }
182
br_vlan_put_master(struct net_bridge_vlan * masterv)183 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
184 {
185 struct net_bridge_vlan_group *vg;
186
187 if (!br_vlan_is_master(masterv))
188 return;
189
190 vg = br_vlan_group(masterv->br);
191 if (refcount_dec_and_test(&masterv->refcnt)) {
192 rhashtable_remove_fast(&vg->vlan_hash,
193 &masterv->vnode, br_vlan_rht_params);
194 __vlan_del_list(masterv);
195 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
196 }
197 }
198
nbp_vlan_rcu_free(struct rcu_head * rcu)199 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
200 {
201 struct net_bridge_vlan *v;
202
203 v = container_of(rcu, struct net_bridge_vlan, rcu);
204 WARN_ON(br_vlan_is_master(v));
205 /* if we had per-port stats configured then free them here */
206 if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
207 free_percpu(v->stats);
208 v->stats = NULL;
209 kfree(v);
210 }
211
212 /* This is the shared VLAN add function which works for both ports and bridge
213 * devices. There are four possible calls to this function in terms of the
214 * vlan entry type:
215 * 1. vlan is being added on a port (no master flags, global entry exists)
216 * 2. vlan is being added on a bridge (both master and brentry flags)
217 * 3. vlan is being added on a port, but a global entry didn't exist which
218 * is being created right now (master flag set, brentry flag unset), the
219 * global entry is used for global per-vlan features, but not for filtering
220 * 4. same as 3 but with both master and brentry flags set so the entry
221 * will be used for filtering in both the port and the bridge
222 */
__vlan_add(struct net_bridge_vlan * v,u16 flags,struct netlink_ext_ack * extack)223 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
224 struct netlink_ext_ack *extack)
225 {
226 struct net_bridge_vlan *masterv = NULL;
227 struct net_bridge_port *p = NULL;
228 struct net_bridge_vlan_group *vg;
229 struct net_device *dev;
230 struct net_bridge *br;
231 int err;
232
233 if (br_vlan_is_master(v)) {
234 br = v->br;
235 dev = br->dev;
236 vg = br_vlan_group(br);
237 } else {
238 p = v->port;
239 br = p->br;
240 dev = p->dev;
241 vg = nbp_vlan_group(p);
242 }
243
244 if (p) {
245 /* Add VLAN to the device filter if it is supported.
246 * This ensures tagged traffic enters the bridge when
247 * promiscuous mode is disabled by br_manage_promisc().
248 */
249 err = __vlan_vid_add(dev, br, v, flags, extack);
250 if (err)
251 goto out;
252
253 /* need to work on the master vlan too */
254 if (flags & BRIDGE_VLAN_INFO_MASTER) {
255 bool changed;
256
257 err = br_vlan_add(br, v->vid,
258 flags | BRIDGE_VLAN_INFO_BRENTRY,
259 &changed, extack);
260 if (err)
261 goto out_filt;
262
263 if (changed)
264 br_vlan_notify(br, NULL, v->vid, 0,
265 RTM_NEWVLAN);
266 }
267
268 masterv = br_vlan_get_master(br, v->vid, extack);
269 if (!masterv) {
270 err = -ENOMEM;
271 goto out_filt;
272 }
273 v->brvlan = masterv;
274 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
275 v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
276 if (!v->stats) {
277 err = -ENOMEM;
278 goto out_filt;
279 }
280 v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
281 } else {
282 v->stats = masterv->stats;
283 }
284 } else {
285 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
286 if (err && err != -EOPNOTSUPP)
287 goto out;
288 }
289
290 /* Add the dev mac and count the vlan only if it's usable */
291 if (br_vlan_should_use(v)) {
292 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
293 if (err) {
294 br_err(br, "failed insert local address into bridge forwarding table\n");
295 goto out_filt;
296 }
297 vg->num_vlans++;
298 }
299
300 /* set the state before publishing */
301 v->state = BR_STATE_FORWARDING;
302
303 err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
304 br_vlan_rht_params);
305 if (err)
306 goto out_fdb_insert;
307
308 __vlan_add_list(v);
309 __vlan_add_flags(v, flags);
310
311 if (p)
312 nbp_vlan_set_vlan_dev_state(p, v->vid);
313 out:
314 return err;
315
316 out_fdb_insert:
317 if (br_vlan_should_use(v)) {
318 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
319 vg->num_vlans--;
320 }
321
322 out_filt:
323 if (p) {
324 __vlan_vid_del(dev, br, v);
325 if (masterv) {
326 if (v->stats && masterv->stats != v->stats)
327 free_percpu(v->stats);
328 v->stats = NULL;
329
330 br_vlan_put_master(masterv);
331 v->brvlan = NULL;
332 }
333 } else {
334 br_switchdev_port_vlan_del(dev, v->vid);
335 }
336
337 goto out;
338 }
339
__vlan_del(struct net_bridge_vlan * v)340 static int __vlan_del(struct net_bridge_vlan *v)
341 {
342 struct net_bridge_vlan *masterv = v;
343 struct net_bridge_vlan_group *vg;
344 struct net_bridge_port *p = NULL;
345 int err = 0;
346
347 if (br_vlan_is_master(v)) {
348 vg = br_vlan_group(v->br);
349 } else {
350 p = v->port;
351 vg = nbp_vlan_group(v->port);
352 masterv = v->brvlan;
353 }
354
355 __vlan_delete_pvid(vg, v->vid);
356 if (p) {
357 err = __vlan_vid_del(p->dev, p->br, v);
358 if (err)
359 goto out;
360 } else {
361 err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
362 if (err && err != -EOPNOTSUPP)
363 goto out;
364 err = 0;
365 }
366
367 if (br_vlan_should_use(v)) {
368 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
369 vg->num_vlans--;
370 }
371
372 if (masterv != v) {
373 vlan_tunnel_info_del(vg, v);
374 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
375 br_vlan_rht_params);
376 __vlan_del_list(v);
377 nbp_vlan_set_vlan_dev_state(p, v->vid);
378 call_rcu(&v->rcu, nbp_vlan_rcu_free);
379 }
380
381 br_vlan_put_master(masterv);
382 out:
383 return err;
384 }
385
__vlan_group_free(struct net_bridge_vlan_group * vg)386 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
387 {
388 WARN_ON(!list_empty(&vg->vlan_list));
389 rhashtable_destroy(&vg->vlan_hash);
390 vlan_tunnel_deinit(vg);
391 kfree(vg);
392 }
393
__vlan_flush(const struct net_bridge * br,const struct net_bridge_port * p,struct net_bridge_vlan_group * vg)394 static void __vlan_flush(const struct net_bridge *br,
395 const struct net_bridge_port *p,
396 struct net_bridge_vlan_group *vg)
397 {
398 struct net_bridge_vlan *vlan, *tmp;
399 u16 v_start = 0, v_end = 0;
400
401 __vlan_delete_pvid(vg, vg->pvid);
402 list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
403 /* take care of disjoint ranges */
404 if (!v_start) {
405 v_start = vlan->vid;
406 } else if (vlan->vid - v_end != 1) {
407 /* found range end, notify and start next one */
408 br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
409 v_start = vlan->vid;
410 }
411 v_end = vlan->vid;
412
413 __vlan_del(vlan);
414 }
415
416 /* notify about the last/whole vlan range */
417 if (v_start)
418 br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
419 }
420
br_handle_vlan(struct net_bridge * br,const struct net_bridge_port * p,struct net_bridge_vlan_group * vg,struct sk_buff * skb)421 struct sk_buff *br_handle_vlan(struct net_bridge *br,
422 const struct net_bridge_port *p,
423 struct net_bridge_vlan_group *vg,
424 struct sk_buff *skb)
425 {
426 struct br_vlan_stats *stats;
427 struct net_bridge_vlan *v;
428 u16 vid;
429
430 /* If this packet was not filtered at input, let it pass */
431 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
432 goto out;
433
434 /* At this point, we know that the frame was filtered and contains
435 * a valid vlan id. If the vlan id has untagged flag set,
436 * send untagged; otherwise, send tagged.
437 */
438 br_vlan_get_tag(skb, &vid);
439 v = br_vlan_find(vg, vid);
440 /* Vlan entry must be configured at this point. The
441 * only exception is the bridge is set in promisc mode and the
442 * packet is destined for the bridge device. In this case
443 * pass the packet as is.
444 */
445 if (!v || !br_vlan_should_use(v)) {
446 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
447 goto out;
448 } else {
449 kfree_skb(skb);
450 return NULL;
451 }
452 }
453 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
454 stats = this_cpu_ptr(v->stats);
455 u64_stats_update_begin(&stats->syncp);
456 stats->tx_bytes += skb->len;
457 stats->tx_packets++;
458 u64_stats_update_end(&stats->syncp);
459 }
460
461 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
462 __vlan_hwaccel_clear_tag(skb);
463
464 if (p && (p->flags & BR_VLAN_TUNNEL) &&
465 br_handle_egress_vlan_tunnel(skb, v)) {
466 kfree_skb(skb);
467 return NULL;
468 }
469 out:
470 return skb;
471 }
472
473 /* Called under RCU */
__allowed_ingress(const struct net_bridge * br,struct net_bridge_vlan_group * vg,struct sk_buff * skb,u16 * vid,u8 * state)474 static bool __allowed_ingress(const struct net_bridge *br,
475 struct net_bridge_vlan_group *vg,
476 struct sk_buff *skb, u16 *vid,
477 u8 *state)
478 {
479 struct br_vlan_stats *stats;
480 struct net_bridge_vlan *v;
481 bool tagged;
482
483 BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
484 /* If vlan tx offload is disabled on bridge device and frame was
485 * sent from vlan device on the bridge device, it does not have
486 * HW accelerated vlan tag.
487 */
488 if (unlikely(!skb_vlan_tag_present(skb) &&
489 skb->protocol == br->vlan_proto)) {
490 skb = skb_vlan_untag(skb);
491 if (unlikely(!skb))
492 return false;
493 }
494
495 if (!br_vlan_get_tag(skb, vid)) {
496 /* Tagged frame */
497 if (skb->vlan_proto != br->vlan_proto) {
498 /* Protocol-mismatch, empty out vlan_tci for new tag */
499 skb_push(skb, ETH_HLEN);
500 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
501 skb_vlan_tag_get(skb));
502 if (unlikely(!skb))
503 return false;
504
505 skb_pull(skb, ETH_HLEN);
506 skb_reset_mac_len(skb);
507 *vid = 0;
508 tagged = false;
509 } else {
510 tagged = true;
511 }
512 } else {
513 /* Untagged frame */
514 tagged = false;
515 }
516
517 if (!*vid) {
518 u16 pvid = br_get_pvid(vg);
519
520 /* Frame had a tag with VID 0 or did not have a tag.
521 * See if pvid is set on this port. That tells us which
522 * vlan untagged or priority-tagged traffic belongs to.
523 */
524 if (!pvid)
525 goto drop;
526
527 /* PVID is set on this port. Any untagged or priority-tagged
528 * ingress frame is considered to belong to this vlan.
529 */
530 *vid = pvid;
531 if (likely(!tagged))
532 /* Untagged Frame. */
533 __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
534 else
535 /* Priority-tagged Frame.
536 * At this point, we know that skb->vlan_tci VID
537 * field was 0.
538 * We update only VID field and preserve PCP field.
539 */
540 skb->vlan_tci |= pvid;
541
542 /* if stats are disabled we can avoid the lookup */
543 if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
544 if (*state == BR_STATE_FORWARDING) {
545 *state = br_vlan_get_pvid_state(vg);
546 if (!br_vlan_state_allowed(*state, true))
547 goto drop;
548 }
549 return true;
550 }
551 }
552 v = br_vlan_find(vg, *vid);
553 if (!v || !br_vlan_should_use(v))
554 goto drop;
555
556 if (*state == BR_STATE_FORWARDING) {
557 *state = br_vlan_get_state(v);
558 if (!br_vlan_state_allowed(*state, true))
559 goto drop;
560 }
561
562 if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
563 stats = this_cpu_ptr(v->stats);
564 u64_stats_update_begin(&stats->syncp);
565 stats->rx_bytes += skb->len;
566 stats->rx_packets++;
567 u64_stats_update_end(&stats->syncp);
568 }
569
570 return true;
571
572 drop:
573 kfree_skb(skb);
574 return false;
575 }
576
br_allowed_ingress(const struct net_bridge * br,struct net_bridge_vlan_group * vg,struct sk_buff * skb,u16 * vid,u8 * state)577 bool br_allowed_ingress(const struct net_bridge *br,
578 struct net_bridge_vlan_group *vg, struct sk_buff *skb,
579 u16 *vid, u8 *state)
580 {
581 /* If VLAN filtering is disabled on the bridge, all packets are
582 * permitted.
583 */
584 if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
585 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
586 return true;
587 }
588
589 return __allowed_ingress(br, vg, skb, vid, state);
590 }
591
592 /* Called under RCU. */
br_allowed_egress(struct net_bridge_vlan_group * vg,const struct sk_buff * skb)593 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
594 const struct sk_buff *skb)
595 {
596 const struct net_bridge_vlan *v;
597 u16 vid;
598
599 /* If this packet was not filtered at input, let it pass */
600 if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
601 return true;
602
603 br_vlan_get_tag(skb, &vid);
604 v = br_vlan_find(vg, vid);
605 if (v && br_vlan_should_use(v) &&
606 br_vlan_state_allowed(br_vlan_get_state(v), false))
607 return true;
608
609 return false;
610 }
611
612 /* Called under RCU */
br_should_learn(struct net_bridge_port * p,struct sk_buff * skb,u16 * vid)613 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
614 {
615 struct net_bridge_vlan_group *vg;
616 struct net_bridge *br = p->br;
617 struct net_bridge_vlan *v;
618
619 /* If filtering was disabled at input, let it pass. */
620 if (!br_opt_get(br, BROPT_VLAN_ENABLED))
621 return true;
622
623 vg = nbp_vlan_group_rcu(p);
624 if (!vg || !vg->num_vlans)
625 return false;
626
627 if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
628 *vid = 0;
629
630 if (!*vid) {
631 *vid = br_get_pvid(vg);
632 if (!*vid ||
633 !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
634 return false;
635
636 return true;
637 }
638
639 v = br_vlan_find(vg, *vid);
640 if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
641 return true;
642
643 return false;
644 }
645
br_vlan_add_existing(struct net_bridge * br,struct net_bridge_vlan_group * vg,struct net_bridge_vlan * vlan,u16 flags,bool * changed,struct netlink_ext_ack * extack)646 static int br_vlan_add_existing(struct net_bridge *br,
647 struct net_bridge_vlan_group *vg,
648 struct net_bridge_vlan *vlan,
649 u16 flags, bool *changed,
650 struct netlink_ext_ack *extack)
651 {
652 int err;
653
654 err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
655 if (err && err != -EOPNOTSUPP)
656 return err;
657
658 if (!br_vlan_is_brentry(vlan)) {
659 /* Trying to change flags of non-existent bridge vlan */
660 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
661 err = -EINVAL;
662 goto err_flags;
663 }
664 /* It was only kept for port vlans, now make it real */
665 err = br_fdb_insert(br, NULL, br->dev->dev_addr,
666 vlan->vid);
667 if (err) {
668 br_err(br, "failed to insert local address into bridge forwarding table\n");
669 goto err_fdb_insert;
670 }
671
672 refcount_inc(&vlan->refcnt);
673 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
674 vg->num_vlans++;
675 *changed = true;
676 }
677
678 if (__vlan_add_flags(vlan, flags))
679 *changed = true;
680
681 return 0;
682
683 err_fdb_insert:
684 err_flags:
685 br_switchdev_port_vlan_del(br->dev, vlan->vid);
686 return err;
687 }
688
689 /* Must be protected by RTNL.
690 * Must be called with vid in range from 1 to 4094 inclusive.
691 * changed must be true only if the vlan was created or updated
692 */
br_vlan_add(struct net_bridge * br,u16 vid,u16 flags,bool * changed,struct netlink_ext_ack * extack)693 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
694 struct netlink_ext_ack *extack)
695 {
696 struct net_bridge_vlan_group *vg;
697 struct net_bridge_vlan *vlan;
698 int ret;
699
700 ASSERT_RTNL();
701
702 *changed = false;
703 vg = br_vlan_group(br);
704 vlan = br_vlan_find(vg, vid);
705 if (vlan)
706 return br_vlan_add_existing(br, vg, vlan, flags, changed,
707 extack);
708
709 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
710 if (!vlan)
711 return -ENOMEM;
712
713 vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
714 if (!vlan->stats) {
715 kfree(vlan);
716 return -ENOMEM;
717 }
718 vlan->vid = vid;
719 vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
720 vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
721 vlan->br = br;
722 if (flags & BRIDGE_VLAN_INFO_BRENTRY)
723 refcount_set(&vlan->refcnt, 1);
724 ret = __vlan_add(vlan, flags, extack);
725 if (ret) {
726 free_percpu(vlan->stats);
727 kfree(vlan);
728 } else {
729 *changed = true;
730 }
731
732 return ret;
733 }
734
735 /* Must be protected by RTNL.
736 * Must be called with vid in range from 1 to 4094 inclusive.
737 */
br_vlan_delete(struct net_bridge * br,u16 vid)738 int br_vlan_delete(struct net_bridge *br, u16 vid)
739 {
740 struct net_bridge_vlan_group *vg;
741 struct net_bridge_vlan *v;
742
743 ASSERT_RTNL();
744
745 vg = br_vlan_group(br);
746 v = br_vlan_find(vg, vid);
747 if (!v || !br_vlan_is_brentry(v))
748 return -ENOENT;
749
750 br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
751 br_fdb_delete_by_port(br, NULL, vid, 0);
752
753 vlan_tunnel_info_del(vg, v);
754
755 return __vlan_del(v);
756 }
757
br_vlan_flush(struct net_bridge * br)758 void br_vlan_flush(struct net_bridge *br)
759 {
760 struct net_bridge_vlan_group *vg;
761
762 ASSERT_RTNL();
763
764 vg = br_vlan_group(br);
765 __vlan_flush(br, NULL, vg);
766 RCU_INIT_POINTER(br->vlgrp, NULL);
767 synchronize_rcu();
768 __vlan_group_free(vg);
769 }
770
br_vlan_find(struct net_bridge_vlan_group * vg,u16 vid)771 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
772 {
773 if (!vg)
774 return NULL;
775
776 return br_vlan_lookup(&vg->vlan_hash, vid);
777 }
778
779 /* Must be protected by RTNL. */
recalculate_group_addr(struct net_bridge * br)780 static void recalculate_group_addr(struct net_bridge *br)
781 {
782 if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
783 return;
784
785 spin_lock_bh(&br->lock);
786 if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
787 br->vlan_proto == htons(ETH_P_8021Q)) {
788 /* Bridge Group Address */
789 br->group_addr[5] = 0x00;
790 } else { /* vlan_enabled && ETH_P_8021AD */
791 /* Provider Bridge Group Address */
792 br->group_addr[5] = 0x08;
793 }
794 spin_unlock_bh(&br->lock);
795 }
796
797 /* Must be protected by RTNL. */
br_recalculate_fwd_mask(struct net_bridge * br)798 void br_recalculate_fwd_mask(struct net_bridge *br)
799 {
800 if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
801 br->vlan_proto == htons(ETH_P_8021Q))
802 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
803 else /* vlan_enabled && ETH_P_8021AD */
804 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
805 ~(1u << br->group_addr[5]);
806 }
807
__br_vlan_filter_toggle(struct net_bridge * br,unsigned long val)808 int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
809 {
810 struct switchdev_attr attr = {
811 .orig_dev = br->dev,
812 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
813 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
814 .u.vlan_filtering = val,
815 };
816 int err;
817
818 if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
819 return 0;
820
821 err = switchdev_port_attr_set(br->dev, &attr);
822 if (err && err != -EOPNOTSUPP)
823 return err;
824
825 br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
826 br_manage_promisc(br);
827 recalculate_group_addr(br);
828 br_recalculate_fwd_mask(br);
829
830 return 0;
831 }
832
br_vlan_filter_toggle(struct net_bridge * br,unsigned long val)833 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
834 {
835 return __br_vlan_filter_toggle(br, val);
836 }
837
br_vlan_enabled(const struct net_device * dev)838 bool br_vlan_enabled(const struct net_device *dev)
839 {
840 struct net_bridge *br = netdev_priv(dev);
841
842 return br_opt_get(br, BROPT_VLAN_ENABLED);
843 }
844 EXPORT_SYMBOL_GPL(br_vlan_enabled);
845
br_vlan_get_proto(const struct net_device * dev,u16 * p_proto)846 int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
847 {
848 struct net_bridge *br = netdev_priv(dev);
849
850 *p_proto = ntohs(br->vlan_proto);
851
852 return 0;
853 }
854 EXPORT_SYMBOL_GPL(br_vlan_get_proto);
855
__br_vlan_set_proto(struct net_bridge * br,__be16 proto)856 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
857 {
858 struct switchdev_attr attr = {
859 .orig_dev = br->dev,
860 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
861 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
862 .u.vlan_protocol = ntohs(proto),
863 };
864 int err = 0;
865 struct net_bridge_port *p;
866 struct net_bridge_vlan *vlan;
867 struct net_bridge_vlan_group *vg;
868 __be16 oldproto = br->vlan_proto;
869
870 if (br->vlan_proto == proto)
871 return 0;
872
873 err = switchdev_port_attr_set(br->dev, &attr);
874 if (err && err != -EOPNOTSUPP)
875 return err;
876
877 /* Add VLANs for the new proto to the device filter. */
878 list_for_each_entry(p, &br->port_list, list) {
879 vg = nbp_vlan_group(p);
880 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
881 if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
882 continue;
883 err = vlan_vid_add(p->dev, proto, vlan->vid);
884 if (err)
885 goto err_filt;
886 }
887 }
888
889 br->vlan_proto = proto;
890
891 recalculate_group_addr(br);
892 br_recalculate_fwd_mask(br);
893
894 /* Delete VLANs for the old proto from the device filter. */
895 list_for_each_entry(p, &br->port_list, list) {
896 vg = nbp_vlan_group(p);
897 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
898 if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
899 continue;
900 vlan_vid_del(p->dev, oldproto, vlan->vid);
901 }
902 }
903
904 return 0;
905
906 err_filt:
907 attr.u.vlan_protocol = ntohs(oldproto);
908 switchdev_port_attr_set(br->dev, &attr);
909
910 list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist) {
911 if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
912 continue;
913 vlan_vid_del(p->dev, proto, vlan->vid);
914 }
915
916 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
917 vg = nbp_vlan_group(p);
918 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
919 if (vlan->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
920 continue;
921 vlan_vid_del(p->dev, proto, vlan->vid);
922 }
923 }
924
925 return err;
926 }
927
br_vlan_set_proto(struct net_bridge * br,unsigned long val)928 int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
929 {
930 if (val != ETH_P_8021Q && val != ETH_P_8021AD)
931 return -EPROTONOSUPPORT;
932
933 return __br_vlan_set_proto(br, htons(val));
934 }
935
br_vlan_set_stats(struct net_bridge * br,unsigned long val)936 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
937 {
938 switch (val) {
939 case 0:
940 case 1:
941 br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
942 break;
943 default:
944 return -EINVAL;
945 }
946
947 return 0;
948 }
949
br_vlan_set_stats_per_port(struct net_bridge * br,unsigned long val)950 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
951 {
952 struct net_bridge_port *p;
953
954 /* allow to change the option if there are no port vlans configured */
955 list_for_each_entry(p, &br->port_list, list) {
956 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
957
958 if (vg->num_vlans)
959 return -EBUSY;
960 }
961
962 switch (val) {
963 case 0:
964 case 1:
965 br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
966 break;
967 default:
968 return -EINVAL;
969 }
970
971 return 0;
972 }
973
vlan_default_pvid(struct net_bridge_vlan_group * vg,u16 vid)974 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
975 {
976 struct net_bridge_vlan *v;
977
978 if (vid != vg->pvid)
979 return false;
980
981 v = br_vlan_lookup(&vg->vlan_hash, vid);
982 if (v && br_vlan_should_use(v) &&
983 (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
984 return true;
985
986 return false;
987 }
988
br_vlan_disable_default_pvid(struct net_bridge * br)989 static void br_vlan_disable_default_pvid(struct net_bridge *br)
990 {
991 struct net_bridge_port *p;
992 u16 pvid = br->default_pvid;
993
994 /* Disable default_pvid on all ports where it is still
995 * configured.
996 */
997 if (vlan_default_pvid(br_vlan_group(br), pvid)) {
998 if (!br_vlan_delete(br, pvid))
999 br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1000 }
1001
1002 list_for_each_entry(p, &br->port_list, list) {
1003 if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
1004 !nbp_vlan_delete(p, pvid))
1005 br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1006 }
1007
1008 br->default_pvid = 0;
1009 }
1010
__br_vlan_set_default_pvid(struct net_bridge * br,u16 pvid,struct netlink_ext_ack * extack)1011 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
1012 struct netlink_ext_ack *extack)
1013 {
1014 const struct net_bridge_vlan *pvent;
1015 struct net_bridge_vlan_group *vg;
1016 struct net_bridge_port *p;
1017 unsigned long *changed;
1018 bool vlchange;
1019 u16 old_pvid;
1020 int err = 0;
1021
1022 if (!pvid) {
1023 br_vlan_disable_default_pvid(br);
1024 return 0;
1025 }
1026
1027 changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1028 if (!changed)
1029 return -ENOMEM;
1030
1031 old_pvid = br->default_pvid;
1032
1033 /* Update default_pvid config only if we do not conflict with
1034 * user configuration.
1035 */
1036 vg = br_vlan_group(br);
1037 pvent = br_vlan_find(vg, pvid);
1038 if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1039 (!pvent || !br_vlan_should_use(pvent))) {
1040 err = br_vlan_add(br, pvid,
1041 BRIDGE_VLAN_INFO_PVID |
1042 BRIDGE_VLAN_INFO_UNTAGGED |
1043 BRIDGE_VLAN_INFO_BRENTRY,
1044 &vlchange, extack);
1045 if (err)
1046 goto out;
1047
1048 if (br_vlan_delete(br, old_pvid))
1049 br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
1050 br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1051 set_bit(0, changed);
1052 }
1053
1054 list_for_each_entry(p, &br->port_list, list) {
1055 /* Update default_pvid config only if we do not conflict with
1056 * user configuration.
1057 */
1058 vg = nbp_vlan_group(p);
1059 if ((old_pvid &&
1060 !vlan_default_pvid(vg, old_pvid)) ||
1061 br_vlan_find(vg, pvid))
1062 continue;
1063
1064 err = nbp_vlan_add(p, pvid,
1065 BRIDGE_VLAN_INFO_PVID |
1066 BRIDGE_VLAN_INFO_UNTAGGED,
1067 &vlchange, extack);
1068 if (err)
1069 goto err_port;
1070 if (nbp_vlan_delete(p, old_pvid))
1071 br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
1072 br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1073 set_bit(p->port_no, changed);
1074 }
1075
1076 br->default_pvid = pvid;
1077
1078 out:
1079 bitmap_free(changed);
1080 return err;
1081
1082 err_port:
1083 list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1084 if (!test_bit(p->port_no, changed))
1085 continue;
1086
1087 if (old_pvid) {
1088 nbp_vlan_add(p, old_pvid,
1089 BRIDGE_VLAN_INFO_PVID |
1090 BRIDGE_VLAN_INFO_UNTAGGED,
1091 &vlchange, NULL);
1092 br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
1093 }
1094 nbp_vlan_delete(p, pvid);
1095 br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1096 }
1097
1098 if (test_bit(0, changed)) {
1099 if (old_pvid) {
1100 br_vlan_add(br, old_pvid,
1101 BRIDGE_VLAN_INFO_PVID |
1102 BRIDGE_VLAN_INFO_UNTAGGED |
1103 BRIDGE_VLAN_INFO_BRENTRY,
1104 &vlchange, NULL);
1105 br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
1106 }
1107 br_vlan_delete(br, pvid);
1108 br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1109 }
1110 goto out;
1111 }
1112
br_vlan_set_default_pvid(struct net_bridge * br,unsigned long val)1113 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1114 {
1115 u16 pvid = val;
1116 int err = 0;
1117
1118 if (val >= VLAN_VID_MASK)
1119 return -EINVAL;
1120
1121 if (pvid == br->default_pvid)
1122 goto out;
1123
1124 /* Only allow default pvid change when filtering is disabled */
1125 if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1126 pr_info_once("Please disable vlan filtering to change default_pvid\n");
1127 err = -EPERM;
1128 goto out;
1129 }
1130 err = __br_vlan_set_default_pvid(br, pvid, NULL);
1131 out:
1132 return err;
1133 }
1134
br_vlan_init(struct net_bridge * br)1135 int br_vlan_init(struct net_bridge *br)
1136 {
1137 struct net_bridge_vlan_group *vg;
1138 int ret = -ENOMEM;
1139
1140 vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1141 if (!vg)
1142 goto out;
1143 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1144 if (ret)
1145 goto err_rhtbl;
1146 ret = vlan_tunnel_init(vg);
1147 if (ret)
1148 goto err_tunnel_init;
1149 INIT_LIST_HEAD(&vg->vlan_list);
1150 br->vlan_proto = htons(ETH_P_8021Q);
1151 br->default_pvid = 1;
1152 rcu_assign_pointer(br->vlgrp, vg);
1153
1154 out:
1155 return ret;
1156
1157 err_tunnel_init:
1158 rhashtable_destroy(&vg->vlan_hash);
1159 err_rhtbl:
1160 kfree(vg);
1161
1162 goto out;
1163 }
1164
nbp_vlan_init(struct net_bridge_port * p,struct netlink_ext_ack * extack)1165 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1166 {
1167 struct switchdev_attr attr = {
1168 .orig_dev = p->br->dev,
1169 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1170 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1171 .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1172 };
1173 struct net_bridge_vlan_group *vg;
1174 int ret = -ENOMEM;
1175
1176 vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1177 if (!vg)
1178 goto out;
1179
1180 ret = switchdev_port_attr_set(p->dev, &attr);
1181 if (ret && ret != -EOPNOTSUPP)
1182 goto err_vlan_enabled;
1183
1184 ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1185 if (ret)
1186 goto err_rhtbl;
1187 ret = vlan_tunnel_init(vg);
1188 if (ret)
1189 goto err_tunnel_init;
1190 INIT_LIST_HEAD(&vg->vlan_list);
1191 rcu_assign_pointer(p->vlgrp, vg);
1192 if (p->br->default_pvid) {
1193 bool changed;
1194
1195 ret = nbp_vlan_add(p, p->br->default_pvid,
1196 BRIDGE_VLAN_INFO_PVID |
1197 BRIDGE_VLAN_INFO_UNTAGGED,
1198 &changed, extack);
1199 if (ret)
1200 goto err_vlan_add;
1201 br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1202 }
1203 out:
1204 return ret;
1205
1206 err_vlan_add:
1207 RCU_INIT_POINTER(p->vlgrp, NULL);
1208 synchronize_rcu();
1209 vlan_tunnel_deinit(vg);
1210 err_tunnel_init:
1211 rhashtable_destroy(&vg->vlan_hash);
1212 err_rhtbl:
1213 err_vlan_enabled:
1214 kfree(vg);
1215
1216 goto out;
1217 }
1218
1219 /* Must be protected by RTNL.
1220 * Must be called with vid in range from 1 to 4094 inclusive.
1221 * changed must be true only if the vlan was created or updated
1222 */
nbp_vlan_add(struct net_bridge_port * port,u16 vid,u16 flags,bool * changed,struct netlink_ext_ack * extack)1223 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1224 bool *changed, struct netlink_ext_ack *extack)
1225 {
1226 struct net_bridge_vlan *vlan;
1227 int ret;
1228
1229 ASSERT_RTNL();
1230
1231 *changed = false;
1232 vlan = br_vlan_find(nbp_vlan_group(port), vid);
1233 if (vlan) {
1234 /* Pass the flags to the hardware bridge */
1235 ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1236 if (ret && ret != -EOPNOTSUPP)
1237 return ret;
1238 *changed = __vlan_add_flags(vlan, flags);
1239
1240 return 0;
1241 }
1242
1243 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1244 if (!vlan)
1245 return -ENOMEM;
1246
1247 vlan->vid = vid;
1248 vlan->port = port;
1249 ret = __vlan_add(vlan, flags, extack);
1250 if (ret)
1251 kfree(vlan);
1252 else
1253 *changed = true;
1254
1255 return ret;
1256 }
1257
1258 /* Must be protected by RTNL.
1259 * Must be called with vid in range from 1 to 4094 inclusive.
1260 */
nbp_vlan_delete(struct net_bridge_port * port,u16 vid)1261 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1262 {
1263 struct net_bridge_vlan *v;
1264
1265 ASSERT_RTNL();
1266
1267 v = br_vlan_find(nbp_vlan_group(port), vid);
1268 if (!v)
1269 return -ENOENT;
1270 br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1271 br_fdb_delete_by_port(port->br, port, vid, 0);
1272
1273 return __vlan_del(v);
1274 }
1275
nbp_vlan_flush(struct net_bridge_port * port)1276 void nbp_vlan_flush(struct net_bridge_port *port)
1277 {
1278 struct net_bridge_vlan_group *vg;
1279
1280 ASSERT_RTNL();
1281
1282 vg = nbp_vlan_group(port);
1283 __vlan_flush(port->br, port, vg);
1284 RCU_INIT_POINTER(port->vlgrp, NULL);
1285 synchronize_rcu();
1286 __vlan_group_free(vg);
1287 }
1288
br_vlan_get_stats(const struct net_bridge_vlan * v,struct br_vlan_stats * stats)1289 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1290 struct br_vlan_stats *stats)
1291 {
1292 int i;
1293
1294 memset(stats, 0, sizeof(*stats));
1295 for_each_possible_cpu(i) {
1296 u64 rxpackets, rxbytes, txpackets, txbytes;
1297 struct br_vlan_stats *cpu_stats;
1298 unsigned int start;
1299
1300 cpu_stats = per_cpu_ptr(v->stats, i);
1301 do {
1302 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1303 rxpackets = cpu_stats->rx_packets;
1304 rxbytes = cpu_stats->rx_bytes;
1305 txbytes = cpu_stats->tx_bytes;
1306 txpackets = cpu_stats->tx_packets;
1307 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1308
1309 stats->rx_packets += rxpackets;
1310 stats->rx_bytes += rxbytes;
1311 stats->tx_bytes += txbytes;
1312 stats->tx_packets += txpackets;
1313 }
1314 }
1315
br_vlan_get_pvid(const struct net_device * dev,u16 * p_pvid)1316 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1317 {
1318 struct net_bridge_vlan_group *vg;
1319 struct net_bridge_port *p;
1320
1321 ASSERT_RTNL();
1322 p = br_port_get_check_rtnl(dev);
1323 if (p)
1324 vg = nbp_vlan_group(p);
1325 else if (netif_is_bridge_master(dev))
1326 vg = br_vlan_group(netdev_priv(dev));
1327 else
1328 return -EINVAL;
1329
1330 *p_pvid = br_get_pvid(vg);
1331 return 0;
1332 }
1333 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1334
br_vlan_get_pvid_rcu(const struct net_device * dev,u16 * p_pvid)1335 int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1336 {
1337 struct net_bridge_vlan_group *vg;
1338 struct net_bridge_port *p;
1339
1340 p = br_port_get_check_rcu(dev);
1341 if (p)
1342 vg = nbp_vlan_group_rcu(p);
1343 else if (netif_is_bridge_master(dev))
1344 vg = br_vlan_group_rcu(netdev_priv(dev));
1345 else
1346 return -EINVAL;
1347
1348 *p_pvid = br_get_pvid(vg);
1349 return 0;
1350 }
1351 EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1352
br_vlan_get_info(const struct net_device * dev,u16 vid,struct bridge_vlan_info * p_vinfo)1353 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1354 struct bridge_vlan_info *p_vinfo)
1355 {
1356 struct net_bridge_vlan_group *vg;
1357 struct net_bridge_vlan *v;
1358 struct net_bridge_port *p;
1359
1360 ASSERT_RTNL();
1361 p = br_port_get_check_rtnl(dev);
1362 if (p)
1363 vg = nbp_vlan_group(p);
1364 else if (netif_is_bridge_master(dev))
1365 vg = br_vlan_group(netdev_priv(dev));
1366 else
1367 return -EINVAL;
1368
1369 v = br_vlan_find(vg, vid);
1370 if (!v)
1371 return -ENOENT;
1372
1373 p_vinfo->vid = vid;
1374 p_vinfo->flags = v->flags;
1375 if (vid == br_get_pvid(vg))
1376 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1377 return 0;
1378 }
1379 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1380
br_vlan_is_bind_vlan_dev(const struct net_device * dev)1381 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1382 {
1383 return is_vlan_dev(dev) &&
1384 !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1385 }
1386
br_vlan_is_bind_vlan_dev_fn(struct net_device * dev,__always_unused struct netdev_nested_priv * priv)1387 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1388 __always_unused struct netdev_nested_priv *priv)
1389 {
1390 return br_vlan_is_bind_vlan_dev(dev);
1391 }
1392
br_vlan_has_upper_bind_vlan_dev(struct net_device * dev)1393 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1394 {
1395 int found;
1396
1397 rcu_read_lock();
1398 found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1399 NULL);
1400 rcu_read_unlock();
1401
1402 return !!found;
1403 }
1404
1405 struct br_vlan_bind_walk_data {
1406 u16 vid;
1407 struct net_device *result;
1408 };
1409
br_vlan_match_bind_vlan_dev_fn(struct net_device * dev,struct netdev_nested_priv * priv)1410 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1411 struct netdev_nested_priv *priv)
1412 {
1413 struct br_vlan_bind_walk_data *data = priv->data;
1414 int found = 0;
1415
1416 if (br_vlan_is_bind_vlan_dev(dev) &&
1417 vlan_dev_priv(dev)->vlan_id == data->vid) {
1418 data->result = dev;
1419 found = 1;
1420 }
1421
1422 return found;
1423 }
1424
1425 static struct net_device *
br_vlan_get_upper_bind_vlan_dev(struct net_device * dev,u16 vid)1426 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1427 {
1428 struct br_vlan_bind_walk_data data = {
1429 .vid = vid,
1430 };
1431 struct netdev_nested_priv priv = {
1432 .data = (void *)&data,
1433 };
1434
1435 rcu_read_lock();
1436 netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1437 &priv);
1438 rcu_read_unlock();
1439
1440 return data.result;
1441 }
1442
br_vlan_is_dev_up(const struct net_device * dev)1443 static bool br_vlan_is_dev_up(const struct net_device *dev)
1444 {
1445 return !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1446 }
1447
br_vlan_set_vlan_dev_state(const struct net_bridge * br,struct net_device * vlan_dev)1448 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1449 struct net_device *vlan_dev)
1450 {
1451 u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1452 struct net_bridge_vlan_group *vg;
1453 struct net_bridge_port *p;
1454 bool has_carrier = false;
1455
1456 if (!netif_carrier_ok(br->dev)) {
1457 netif_carrier_off(vlan_dev);
1458 return;
1459 }
1460
1461 list_for_each_entry(p, &br->port_list, list) {
1462 vg = nbp_vlan_group(p);
1463 if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1464 has_carrier = true;
1465 break;
1466 }
1467 }
1468
1469 if (has_carrier)
1470 netif_carrier_on(vlan_dev);
1471 else
1472 netif_carrier_off(vlan_dev);
1473 }
1474
br_vlan_set_all_vlan_dev_state(struct net_bridge_port * p)1475 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1476 {
1477 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1478 struct net_bridge_vlan *vlan;
1479 struct net_device *vlan_dev;
1480
1481 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1482 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1483 vlan->vid);
1484 if (vlan_dev) {
1485 if (br_vlan_is_dev_up(p->dev)) {
1486 if (netif_carrier_ok(p->br->dev))
1487 netif_carrier_on(vlan_dev);
1488 } else {
1489 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1490 }
1491 }
1492 }
1493 }
1494
br_vlan_upper_change(struct net_device * dev,struct net_device * upper_dev,bool linking)1495 static void br_vlan_upper_change(struct net_device *dev,
1496 struct net_device *upper_dev,
1497 bool linking)
1498 {
1499 struct net_bridge *br = netdev_priv(dev);
1500
1501 if (!br_vlan_is_bind_vlan_dev(upper_dev))
1502 return;
1503
1504 if (linking) {
1505 br_vlan_set_vlan_dev_state(br, upper_dev);
1506 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1507 } else {
1508 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1509 br_vlan_has_upper_bind_vlan_dev(dev));
1510 }
1511 }
1512
1513 struct br_vlan_link_state_walk_data {
1514 struct net_bridge *br;
1515 };
1516
br_vlan_link_state_change_fn(struct net_device * vlan_dev,struct netdev_nested_priv * priv)1517 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1518 struct netdev_nested_priv *priv)
1519 {
1520 struct br_vlan_link_state_walk_data *data = priv->data;
1521
1522 if (br_vlan_is_bind_vlan_dev(vlan_dev))
1523 br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1524
1525 return 0;
1526 }
1527
br_vlan_link_state_change(struct net_device * dev,struct net_bridge * br)1528 static void br_vlan_link_state_change(struct net_device *dev,
1529 struct net_bridge *br)
1530 {
1531 struct br_vlan_link_state_walk_data data = {
1532 .br = br
1533 };
1534 struct netdev_nested_priv priv = {
1535 .data = (void *)&data,
1536 };
1537
1538 rcu_read_lock();
1539 netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1540 &priv);
1541 rcu_read_unlock();
1542 }
1543
1544 /* Must be protected by RTNL. */
nbp_vlan_set_vlan_dev_state(struct net_bridge_port * p,u16 vid)1545 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1546 {
1547 struct net_device *vlan_dev;
1548
1549 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1550 return;
1551
1552 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1553 if (vlan_dev)
1554 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1555 }
1556
1557 /* Must be protected by RTNL. */
br_vlan_bridge_event(struct net_device * dev,unsigned long event,void * ptr)1558 int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1559 {
1560 struct netdev_notifier_changeupper_info *info;
1561 struct net_bridge *br = netdev_priv(dev);
1562 int vlcmd = 0, ret = 0;
1563 bool changed = false;
1564
1565 switch (event) {
1566 case NETDEV_REGISTER:
1567 ret = br_vlan_add(br, br->default_pvid,
1568 BRIDGE_VLAN_INFO_PVID |
1569 BRIDGE_VLAN_INFO_UNTAGGED |
1570 BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1571 vlcmd = RTM_NEWVLAN;
1572 break;
1573 case NETDEV_UNREGISTER:
1574 changed = !br_vlan_delete(br, br->default_pvid);
1575 vlcmd = RTM_DELVLAN;
1576 break;
1577 case NETDEV_CHANGEUPPER:
1578 info = ptr;
1579 br_vlan_upper_change(dev, info->upper_dev, info->linking);
1580 break;
1581
1582 case NETDEV_CHANGE:
1583 case NETDEV_UP:
1584 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1585 break;
1586 br_vlan_link_state_change(dev, br);
1587 break;
1588 }
1589 if (changed)
1590 br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1591
1592 return ret;
1593 }
1594
1595 /* Must be protected by RTNL. */
br_vlan_port_event(struct net_bridge_port * p,unsigned long event)1596 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1597 {
1598 if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1599 return;
1600
1601 switch (event) {
1602 case NETDEV_CHANGE:
1603 case NETDEV_DOWN:
1604 case NETDEV_UP:
1605 br_vlan_set_all_vlan_dev_state(p);
1606 break;
1607 }
1608 }
1609
br_vlan_stats_fill(struct sk_buff * skb,const struct net_bridge_vlan * v)1610 static bool br_vlan_stats_fill(struct sk_buff *skb,
1611 const struct net_bridge_vlan *v)
1612 {
1613 struct br_vlan_stats stats;
1614 struct nlattr *nest;
1615
1616 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
1617 if (!nest)
1618 return false;
1619
1620 br_vlan_get_stats(v, &stats);
1621 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
1622 BRIDGE_VLANDB_STATS_PAD) ||
1623 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
1624 stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
1625 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
1626 BRIDGE_VLANDB_STATS_PAD) ||
1627 nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
1628 stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
1629 goto out_err;
1630
1631 nla_nest_end(skb, nest);
1632
1633 return true;
1634
1635 out_err:
1636 nla_nest_cancel(skb, nest);
1637 return false;
1638 }
1639
1640 /* v_opts is used to dump the options which must be equal in the whole range */
br_vlan_fill_vids(struct sk_buff * skb,u16 vid,u16 vid_range,const struct net_bridge_vlan * v_opts,u16 flags,bool dump_stats)1641 static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1642 const struct net_bridge_vlan *v_opts,
1643 u16 flags,
1644 bool dump_stats)
1645 {
1646 struct bridge_vlan_info info;
1647 struct nlattr *nest;
1648
1649 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1650 if (!nest)
1651 return false;
1652
1653 memset(&info, 0, sizeof(info));
1654 info.vid = vid;
1655 if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1656 info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1657 if (flags & BRIDGE_VLAN_INFO_PVID)
1658 info.flags |= BRIDGE_VLAN_INFO_PVID;
1659
1660 if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1661 goto out_err;
1662
1663 if (vid_range && vid < vid_range &&
1664 !(flags & BRIDGE_VLAN_INFO_PVID) &&
1665 nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1666 goto out_err;
1667
1668 if (v_opts) {
1669 if (!br_vlan_opts_fill(skb, v_opts))
1670 goto out_err;
1671
1672 if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
1673 goto out_err;
1674 }
1675
1676 nla_nest_end(skb, nest);
1677
1678 return true;
1679
1680 out_err:
1681 nla_nest_cancel(skb, nest);
1682 return false;
1683 }
1684
rtnl_vlan_nlmsg_size(void)1685 static size_t rtnl_vlan_nlmsg_size(void)
1686 {
1687 return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1688 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
1689 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1690 + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
1691 + br_vlan_opts_nl_size(); /* bridge vlan options */
1692 }
1693
br_vlan_notify(const struct net_bridge * br,const struct net_bridge_port * p,u16 vid,u16 vid_range,int cmd)1694 void br_vlan_notify(const struct net_bridge *br,
1695 const struct net_bridge_port *p,
1696 u16 vid, u16 vid_range,
1697 int cmd)
1698 {
1699 struct net_bridge_vlan_group *vg;
1700 struct net_bridge_vlan *v = NULL;
1701 struct br_vlan_msg *bvm;
1702 struct nlmsghdr *nlh;
1703 struct sk_buff *skb;
1704 int err = -ENOBUFS;
1705 struct net *net;
1706 u16 flags = 0;
1707 int ifindex;
1708
1709 /* right now notifications are done only with rtnl held */
1710 ASSERT_RTNL();
1711
1712 if (p) {
1713 ifindex = p->dev->ifindex;
1714 vg = nbp_vlan_group(p);
1715 net = dev_net(p->dev);
1716 } else {
1717 ifindex = br->dev->ifindex;
1718 vg = br_vlan_group(br);
1719 net = dev_net(br->dev);
1720 }
1721
1722 skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1723 if (!skb)
1724 goto out_err;
1725
1726 err = -EMSGSIZE;
1727 nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1728 if (!nlh)
1729 goto out_err;
1730 bvm = nlmsg_data(nlh);
1731 memset(bvm, 0, sizeof(*bvm));
1732 bvm->family = AF_BRIDGE;
1733 bvm->ifindex = ifindex;
1734
1735 switch (cmd) {
1736 case RTM_NEWVLAN:
1737 /* need to find the vlan due to flags/options */
1738 v = br_vlan_find(vg, vid);
1739 if (!v || !br_vlan_should_use(v))
1740 goto out_kfree;
1741
1742 flags = v->flags;
1743 if (br_get_pvid(vg) == v->vid)
1744 flags |= BRIDGE_VLAN_INFO_PVID;
1745 break;
1746 case RTM_DELVLAN:
1747 break;
1748 default:
1749 goto out_kfree;
1750 }
1751
1752 if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1753 goto out_err;
1754
1755 nlmsg_end(skb, nlh);
1756 rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1757 return;
1758
1759 out_err:
1760 rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1761 out_kfree:
1762 kfree_skb(skb);
1763 }
1764
1765 /* check if v_curr can enter a range ending in range_end */
br_vlan_can_enter_range(const struct net_bridge_vlan * v_curr,const struct net_bridge_vlan * range_end)1766 bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1767 const struct net_bridge_vlan *range_end)
1768 {
1769 return v_curr->vid - range_end->vid == 1 &&
1770 range_end->flags == v_curr->flags &&
1771 br_vlan_opts_eq_range(v_curr, range_end);
1772 }
1773
br_vlan_dump_dev(const struct net_device * dev,struct sk_buff * skb,struct netlink_callback * cb,u32 dump_flags)1774 static int br_vlan_dump_dev(const struct net_device *dev,
1775 struct sk_buff *skb,
1776 struct netlink_callback *cb,
1777 u32 dump_flags)
1778 {
1779 struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1780 bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1781 struct net_bridge_vlan_group *vg;
1782 int idx = 0, s_idx = cb->args[1];
1783 struct nlmsghdr *nlh = NULL;
1784 struct net_bridge_port *p;
1785 struct br_vlan_msg *bvm;
1786 struct net_bridge *br;
1787 int err = 0;
1788 u16 pvid;
1789
1790 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1791 return -EINVAL;
1792
1793 if (netif_is_bridge_master(dev)) {
1794 br = netdev_priv(dev);
1795 vg = br_vlan_group_rcu(br);
1796 p = NULL;
1797 } else {
1798 p = br_port_get_rcu(dev);
1799 if (WARN_ON(!p))
1800 return -EINVAL;
1801 vg = nbp_vlan_group_rcu(p);
1802 br = p->br;
1803 }
1804
1805 if (!vg)
1806 return 0;
1807
1808 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1809 RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
1810 if (!nlh)
1811 return -EMSGSIZE;
1812 bvm = nlmsg_data(nlh);
1813 memset(bvm, 0, sizeof(*bvm));
1814 bvm->family = PF_BRIDGE;
1815 bvm->ifindex = dev->ifindex;
1816 pvid = br_get_pvid(vg);
1817
1818 /* idx must stay at range's beginning until it is filled in */
1819 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1820 if (!br_vlan_should_use(v))
1821 continue;
1822 if (idx < s_idx) {
1823 idx++;
1824 continue;
1825 }
1826
1827 if (!range_start) {
1828 range_start = v;
1829 range_end = v;
1830 continue;
1831 }
1832
1833 if (dump_stats || v->vid == pvid ||
1834 !br_vlan_can_enter_range(v, range_end)) {
1835 u16 vlan_flags = br_vlan_flags(range_start, pvid);
1836
1837 if (!br_vlan_fill_vids(skb, range_start->vid,
1838 range_end->vid, range_start,
1839 vlan_flags, dump_stats)) {
1840 err = -EMSGSIZE;
1841 break;
1842 }
1843 /* advance number of filled vlans */
1844 idx += range_end->vid - range_start->vid + 1;
1845
1846 range_start = v;
1847 }
1848 range_end = v;
1849 }
1850
1851 /* err will be 0 and range_start will be set in 3 cases here:
1852 * - first vlan (range_start == range_end)
1853 * - last vlan (range_start == range_end, not in range)
1854 * - last vlan range (range_start != range_end, in range)
1855 */
1856 if (!err && range_start &&
1857 !br_vlan_fill_vids(skb, range_start->vid, range_end->vid,
1858 range_start, br_vlan_flags(range_start, pvid),
1859 dump_stats))
1860 err = -EMSGSIZE;
1861
1862 cb->args[1] = err ? idx : 0;
1863
1864 nlmsg_end(skb, nlh);
1865
1866 return err;
1867 }
1868
1869 static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
1870 [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
1871 };
1872
br_vlan_rtm_dump(struct sk_buff * skb,struct netlink_callback * cb)1873 static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
1874 {
1875 struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
1876 int idx = 0, err = 0, s_idx = cb->args[0];
1877 struct net *net = sock_net(skb->sk);
1878 struct br_vlan_msg *bvm;
1879 struct net_device *dev;
1880 u32 dump_flags = 0;
1881
1882 err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
1883 br_vlan_db_dump_pol, cb->extack);
1884 if (err < 0)
1885 return err;
1886
1887 bvm = nlmsg_data(cb->nlh);
1888 if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
1889 dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
1890
1891 rcu_read_lock();
1892 if (bvm->ifindex) {
1893 dev = dev_get_by_index_rcu(net, bvm->ifindex);
1894 if (!dev) {
1895 err = -ENODEV;
1896 goto out_err;
1897 }
1898 err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1899 /* if the dump completed without an error we return 0 here */
1900 if (err != -EMSGSIZE)
1901 goto out_err;
1902 } else {
1903 for_each_netdev_rcu(net, dev) {
1904 if (idx < s_idx)
1905 goto skip;
1906
1907 err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1908 if (err == -EMSGSIZE)
1909 break;
1910 skip:
1911 idx++;
1912 }
1913 }
1914 cb->args[0] = idx;
1915 rcu_read_unlock();
1916
1917 return skb->len;
1918
1919 out_err:
1920 rcu_read_unlock();
1921
1922 return err;
1923 }
1924
1925 static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
1926 [BRIDGE_VLANDB_ENTRY_INFO] =
1927 NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
1928 [BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
1929 [BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
1930 [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
1931 };
1932
br_vlan_rtm_process_one(struct net_device * dev,const struct nlattr * attr,int cmd,struct netlink_ext_ack * extack)1933 static int br_vlan_rtm_process_one(struct net_device *dev,
1934 const struct nlattr *attr,
1935 int cmd, struct netlink_ext_ack *extack)
1936 {
1937 struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
1938 struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
1939 bool changed = false, skip_processing = false;
1940 struct net_bridge_vlan_group *vg;
1941 struct net_bridge_port *p = NULL;
1942 int err = 0, cmdmap = 0;
1943 struct net_bridge *br;
1944
1945 if (netif_is_bridge_master(dev)) {
1946 br = netdev_priv(dev);
1947 vg = br_vlan_group(br);
1948 } else {
1949 p = br_port_get_rtnl(dev);
1950 if (WARN_ON(!p))
1951 return -ENODEV;
1952 br = p->br;
1953 vg = nbp_vlan_group(p);
1954 }
1955
1956 if (WARN_ON(!vg))
1957 return -ENODEV;
1958
1959 err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
1960 br_vlan_db_policy, extack);
1961 if (err)
1962 return err;
1963
1964 if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
1965 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
1966 return -EINVAL;
1967 }
1968 memset(&vrange_end, 0, sizeof(vrange_end));
1969
1970 vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
1971 if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
1972 BRIDGE_VLAN_INFO_RANGE_END)) {
1973 NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
1974 return -EINVAL;
1975 }
1976 if (!br_vlan_valid_id(vinfo->vid, extack))
1977 return -EINVAL;
1978
1979 if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
1980 vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
1981 /* validate user-provided flags without RANGE_BEGIN */
1982 vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
1983 vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
1984
1985 /* vinfo_last is the range start, vinfo the range end */
1986 vinfo_last = vinfo;
1987 vinfo = &vrange_end;
1988
1989 if (!br_vlan_valid_id(vinfo->vid, extack) ||
1990 !br_vlan_valid_range(vinfo, vinfo_last, extack))
1991 return -EINVAL;
1992 }
1993
1994 switch (cmd) {
1995 case RTM_NEWVLAN:
1996 cmdmap = RTM_SETLINK;
1997 skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
1998 break;
1999 case RTM_DELVLAN:
2000 cmdmap = RTM_DELLINK;
2001 break;
2002 }
2003
2004 if (!skip_processing) {
2005 struct bridge_vlan_info *tmp_last = vinfo_last;
2006
2007 /* br_process_vlan_info may overwrite vinfo_last */
2008 err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
2009 &changed, extack);
2010
2011 /* notify first if anything changed */
2012 if (changed)
2013 br_ifinfo_notify(cmdmap, br, p);
2014
2015 if (err)
2016 return err;
2017 }
2018
2019 /* deal with options */
2020 if (cmd == RTM_NEWVLAN) {
2021 struct net_bridge_vlan *range_start, *range_end;
2022
2023 if (vinfo_last) {
2024 range_start = br_vlan_find(vg, vinfo_last->vid);
2025 range_end = br_vlan_find(vg, vinfo->vid);
2026 } else {
2027 range_start = br_vlan_find(vg, vinfo->vid);
2028 range_end = range_start;
2029 }
2030
2031 err = br_vlan_process_options(br, p, range_start, range_end,
2032 tb, extack);
2033 }
2034
2035 return err;
2036 }
2037
br_vlan_rtm_process(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)2038 static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
2039 struct netlink_ext_ack *extack)
2040 {
2041 struct net *net = sock_net(skb->sk);
2042 struct br_vlan_msg *bvm;
2043 struct net_device *dev;
2044 struct nlattr *attr;
2045 int err, vlans = 0;
2046 int rem;
2047
2048 /* this should validate the header and check for remaining bytes */
2049 err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
2050 extack);
2051 if (err < 0)
2052 return err;
2053
2054 bvm = nlmsg_data(nlh);
2055 dev = __dev_get_by_index(net, bvm->ifindex);
2056 if (!dev)
2057 return -ENODEV;
2058
2059 if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
2060 NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
2061 return -EINVAL;
2062 }
2063
2064 nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2065 if (nla_type(attr) != BRIDGE_VLANDB_ENTRY)
2066 continue;
2067
2068 vlans++;
2069 err = br_vlan_rtm_process_one(dev, attr, nlh->nlmsg_type,
2070 extack);
2071 if (err)
2072 break;
2073 }
2074 if (!vlans) {
2075 NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
2076 err = -EINVAL;
2077 }
2078
2079 return err;
2080 }
2081
br_vlan_rtnl_init(void)2082 void br_vlan_rtnl_init(void)
2083 {
2084 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
2085 br_vlan_rtm_dump, 0);
2086 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
2087 br_vlan_rtm_process, NULL, 0);
2088 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
2089 br_vlan_rtm_process, NULL, 0);
2090 }
2091
br_vlan_rtnl_uninit(void)2092 void br_vlan_rtnl_uninit(void)
2093 {
2094 rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2095 rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2096 rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2097 }
2098