1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Bridge netlink control interface
4 *
5 * Authors:
6 * Stephen Hemminger <shemminger@osdl.org>
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/etherdevice.h>
12 #include <net/rtnetlink.h>
13 #include <net/net_namespace.h>
14 #include <net/sock.h>
15 #include <uapi/linux/if_bridge.h>
16
17 #include "br_private.h"
18 #include "br_private_stp.h"
19 #include "br_private_tunnel.h"
20
__get_num_vlan_infos(struct net_bridge_vlan_group * vg,u32 filter_mask)21 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
22 u32 filter_mask)
23 {
24 struct net_bridge_vlan *v;
25 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
26 u16 flags, pvid;
27 int num_vlans = 0;
28
29 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
30 return 0;
31
32 pvid = br_get_pvid(vg);
33 /* Count number of vlan infos */
34 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
35 flags = 0;
36 /* only a context, bridge vlan not activated */
37 if (!br_vlan_should_use(v))
38 continue;
39 if (v->vid == pvid)
40 flags |= BRIDGE_VLAN_INFO_PVID;
41
42 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
43 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
44
45 if (vid_range_start == 0) {
46 goto initvars;
47 } else if ((v->vid - vid_range_end) == 1 &&
48 flags == vid_range_flags) {
49 vid_range_end = v->vid;
50 continue;
51 } else {
52 if ((vid_range_end - vid_range_start) > 0)
53 num_vlans += 2;
54 else
55 num_vlans += 1;
56 }
57 initvars:
58 vid_range_start = v->vid;
59 vid_range_end = v->vid;
60 vid_range_flags = flags;
61 }
62
63 if (vid_range_start != 0) {
64 if ((vid_range_end - vid_range_start) > 0)
65 num_vlans += 2;
66 else
67 num_vlans += 1;
68 }
69
70 return num_vlans;
71 }
72
br_get_num_vlan_infos(struct net_bridge_vlan_group * vg,u32 filter_mask)73 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
74 u32 filter_mask)
75 {
76 int num_vlans;
77
78 if (!vg)
79 return 0;
80
81 if (filter_mask & RTEXT_FILTER_BRVLAN)
82 return vg->num_vlans;
83
84 rcu_read_lock();
85 num_vlans = __get_num_vlan_infos(vg, filter_mask);
86 rcu_read_unlock();
87
88 return num_vlans;
89 }
90
br_get_link_af_size_filtered(const struct net_device * dev,u32 filter_mask)91 static size_t br_get_link_af_size_filtered(const struct net_device *dev,
92 u32 filter_mask)
93 {
94 struct net_bridge_vlan_group *vg = NULL;
95 struct net_bridge_port *p = NULL;
96 struct net_bridge *br;
97 int num_vlan_infos;
98 size_t vinfo_sz = 0;
99
100 rcu_read_lock();
101 if (netif_is_bridge_port(dev)) {
102 p = br_port_get_check_rcu(dev);
103 if (p)
104 vg = nbp_vlan_group_rcu(p);
105 } else if (dev->priv_flags & IFF_EBRIDGE) {
106 br = netdev_priv(dev);
107 vg = br_vlan_group_rcu(br);
108 }
109 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
110 rcu_read_unlock();
111
112 if (p && (p->flags & BR_VLAN_TUNNEL))
113 vinfo_sz += br_get_vlan_tunnel_info_size(vg);
114
115 /* Each VLAN is returned in bridge_vlan_info along with flags */
116 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
117
118 return vinfo_sz;
119 }
120
br_port_info_size(void)121 static inline size_t br_port_info_size(void)
122 {
123 return nla_total_size(1) /* IFLA_BRPORT_STATE */
124 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
125 + nla_total_size(4) /* IFLA_BRPORT_COST */
126 + nla_total_size(1) /* IFLA_BRPORT_MODE */
127 + nla_total_size(1) /* IFLA_BRPORT_GUARD */
128 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
129 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
130 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
131 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
132 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
133 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */
134 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */
135 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
136 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
137 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
138 + nla_total_size(1) /* IFLA_BRPORT_NEIGH_SUPPRESS */
139 + nla_total_size(1) /* IFLA_BRPORT_ISOLATED */
140 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
141 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
142 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
143 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
144 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
145 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
146 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
147 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
148 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
149 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
150 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
151 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
152 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
153 #endif
154 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_GROUP_FWD_MASK */
155 + 0;
156 }
157
br_nlmsg_size(struct net_device * dev,u32 filter_mask)158 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
159 {
160 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
161 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
162 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
163 + nla_total_size(4) /* IFLA_MASTER */
164 + nla_total_size(4) /* IFLA_MTU */
165 + nla_total_size(4) /* IFLA_LINK */
166 + nla_total_size(1) /* IFLA_OPERSTATE */
167 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
168 + nla_total_size(br_get_link_af_size_filtered(dev,
169 filter_mask)) /* IFLA_AF_SPEC */
170 + nla_total_size(4); /* IFLA_BRPORT_BACKUP_PORT */
171 }
172
br_port_fill_attrs(struct sk_buff * skb,const struct net_bridge_port * p)173 static int br_port_fill_attrs(struct sk_buff *skb,
174 const struct net_bridge_port *p)
175 {
176 u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
177 struct net_bridge_port *backup_p;
178 u64 timerval;
179
180 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
181 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
182 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
183 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
184 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
185 nla_put_u8(skb, IFLA_BRPORT_PROTECT,
186 !!(p->flags & BR_ROOT_BLOCK)) ||
187 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
188 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
189 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
190 !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
191 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
192 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
193 !!(p->flags & BR_FLOOD)) ||
194 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
195 !!(p->flags & BR_MCAST_FLOOD)) ||
196 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
197 !!(p->flags & BR_BCAST_FLOOD)) ||
198 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
199 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
200 !!(p->flags & BR_PROXYARP_WIFI)) ||
201 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
202 &p->designated_root) ||
203 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
204 &p->designated_bridge) ||
205 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
206 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
207 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
208 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
209 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
210 p->topology_change_ack) ||
211 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
212 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
213 BR_VLAN_TUNNEL)) ||
214 nla_put_u16(skb, IFLA_BRPORT_GROUP_FWD_MASK, p->group_fwd_mask) ||
215 nla_put_u8(skb, IFLA_BRPORT_NEIGH_SUPPRESS,
216 !!(p->flags & BR_NEIGH_SUPPRESS)) ||
217 nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)))
218 return -EMSGSIZE;
219
220 timerval = br_timer_value(&p->message_age_timer);
221 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
222 IFLA_BRPORT_PAD))
223 return -EMSGSIZE;
224 timerval = br_timer_value(&p->forward_delay_timer);
225 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
226 IFLA_BRPORT_PAD))
227 return -EMSGSIZE;
228 timerval = br_timer_value(&p->hold_timer);
229 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
230 IFLA_BRPORT_PAD))
231 return -EMSGSIZE;
232
233 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
234 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
235 p->multicast_router))
236 return -EMSGSIZE;
237 #endif
238
239 /* we might be called only with br->lock */
240 rcu_read_lock();
241 backup_p = rcu_dereference(p->backup_port);
242 if (backup_p)
243 nla_put_u32(skb, IFLA_BRPORT_BACKUP_PORT,
244 backup_p->dev->ifindex);
245 rcu_read_unlock();
246
247 return 0;
248 }
249
br_fill_ifvlaninfo_range(struct sk_buff * skb,u16 vid_start,u16 vid_end,u16 flags)250 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
251 u16 vid_end, u16 flags)
252 {
253 struct bridge_vlan_info vinfo;
254
255 if ((vid_end - vid_start) > 0) {
256 /* add range to skb */
257 vinfo.vid = vid_start;
258 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
259 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
260 sizeof(vinfo), &vinfo))
261 goto nla_put_failure;
262
263 vinfo.vid = vid_end;
264 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
265 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
266 sizeof(vinfo), &vinfo))
267 goto nla_put_failure;
268 } else {
269 vinfo.vid = vid_start;
270 vinfo.flags = flags;
271 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
272 sizeof(vinfo), &vinfo))
273 goto nla_put_failure;
274 }
275
276 return 0;
277
278 nla_put_failure:
279 return -EMSGSIZE;
280 }
281
br_fill_ifvlaninfo_compressed(struct sk_buff * skb,struct net_bridge_vlan_group * vg)282 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
283 struct net_bridge_vlan_group *vg)
284 {
285 struct net_bridge_vlan *v;
286 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
287 u16 flags, pvid;
288 int err = 0;
289
290 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
291 * and mark vlan info with begin and end flags
292 * if vlaninfo represents a range
293 */
294 pvid = br_get_pvid(vg);
295 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
296 flags = 0;
297 if (!br_vlan_should_use(v))
298 continue;
299 if (v->vid == pvid)
300 flags |= BRIDGE_VLAN_INFO_PVID;
301
302 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
303 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
304
305 if (vid_range_start == 0) {
306 goto initvars;
307 } else if ((v->vid - vid_range_end) == 1 &&
308 flags == vid_range_flags) {
309 vid_range_end = v->vid;
310 continue;
311 } else {
312 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
313 vid_range_end,
314 vid_range_flags);
315 if (err)
316 return err;
317 }
318
319 initvars:
320 vid_range_start = v->vid;
321 vid_range_end = v->vid;
322 vid_range_flags = flags;
323 }
324
325 if (vid_range_start != 0) {
326 /* Call it once more to send any left over vlans */
327 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
328 vid_range_end,
329 vid_range_flags);
330 if (err)
331 return err;
332 }
333
334 return 0;
335 }
336
br_fill_ifvlaninfo(struct sk_buff * skb,struct net_bridge_vlan_group * vg)337 static int br_fill_ifvlaninfo(struct sk_buff *skb,
338 struct net_bridge_vlan_group *vg)
339 {
340 struct bridge_vlan_info vinfo;
341 struct net_bridge_vlan *v;
342 u16 pvid;
343
344 pvid = br_get_pvid(vg);
345 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
346 if (!br_vlan_should_use(v))
347 continue;
348
349 vinfo.vid = v->vid;
350 vinfo.flags = 0;
351 if (v->vid == pvid)
352 vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
353
354 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
355 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
356
357 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
358 sizeof(vinfo), &vinfo))
359 goto nla_put_failure;
360 }
361
362 return 0;
363
364 nla_put_failure:
365 return -EMSGSIZE;
366 }
367
368 /*
369 * Create one netlink message for one interface
370 * Contains port and master info as well as carrier and bridge state.
371 */
br_fill_ifinfo(struct sk_buff * skb,const struct net_bridge_port * port,u32 pid,u32 seq,int event,unsigned int flags,u32 filter_mask,const struct net_device * dev)372 static int br_fill_ifinfo(struct sk_buff *skb,
373 const struct net_bridge_port *port,
374 u32 pid, u32 seq, int event, unsigned int flags,
375 u32 filter_mask, const struct net_device *dev)
376 {
377 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
378 struct net_bridge *br;
379 struct ifinfomsg *hdr;
380 struct nlmsghdr *nlh;
381
382 if (port)
383 br = port->br;
384 else
385 br = netdev_priv(dev);
386
387 br_debug(br, "br_fill_info event %d port %s master %s\n",
388 event, dev->name, br->dev->name);
389
390 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
391 if (nlh == NULL)
392 return -EMSGSIZE;
393
394 hdr = nlmsg_data(nlh);
395 hdr->ifi_family = AF_BRIDGE;
396 hdr->__ifi_pad = 0;
397 hdr->ifi_type = dev->type;
398 hdr->ifi_index = dev->ifindex;
399 hdr->ifi_flags = dev_get_flags(dev);
400 hdr->ifi_change = 0;
401
402 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
403 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
404 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
405 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
406 (dev->addr_len &&
407 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
408 (dev->ifindex != dev_get_iflink(dev) &&
409 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
410 goto nla_put_failure;
411
412 if (event == RTM_NEWLINK && port) {
413 struct nlattr *nest;
414
415 nest = nla_nest_start(skb, IFLA_PROTINFO);
416 if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
417 goto nla_put_failure;
418 nla_nest_end(skb, nest);
419 }
420
421 /* Check if the VID information is requested */
422 if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
423 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
424 struct net_bridge_vlan_group *vg;
425 struct nlattr *af;
426 int err;
427
428 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
429 rcu_read_lock();
430 if (port)
431 vg = nbp_vlan_group_rcu(port);
432 else
433 vg = br_vlan_group_rcu(br);
434
435 if (!vg || !vg->num_vlans) {
436 rcu_read_unlock();
437 goto done;
438 }
439 af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
440 if (!af) {
441 rcu_read_unlock();
442 goto nla_put_failure;
443 }
444 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
445 err = br_fill_ifvlaninfo_compressed(skb, vg);
446 else
447 err = br_fill_ifvlaninfo(skb, vg);
448
449 if (port && (port->flags & BR_VLAN_TUNNEL))
450 err = br_fill_vlan_tunnel_info(skb, vg);
451 rcu_read_unlock();
452 if (err)
453 goto nla_put_failure;
454 nla_nest_end(skb, af);
455 }
456
457 done:
458 nlmsg_end(skb, nlh);
459 return 0;
460
461 nla_put_failure:
462 nlmsg_cancel(skb, nlh);
463 return -EMSGSIZE;
464 }
465
466 /* Notify listeners of a change in bridge or port information */
br_ifinfo_notify(int event,const struct net_bridge * br,const struct net_bridge_port * port)467 void br_ifinfo_notify(int event, const struct net_bridge *br,
468 const struct net_bridge_port *port)
469 {
470 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
471 struct net_device *dev;
472 struct sk_buff *skb;
473 int err = -ENOBUFS;
474 struct net *net;
475 u16 port_no = 0;
476
477 if (WARN_ON(!port && !br))
478 return;
479
480 if (port) {
481 dev = port->dev;
482 br = port->br;
483 port_no = port->port_no;
484 } else {
485 dev = br->dev;
486 }
487
488 net = dev_net(dev);
489 br_debug(br, "port %u(%s) event %d\n", port_no, dev->name, event);
490
491 skb = nlmsg_new(br_nlmsg_size(dev, filter), GFP_ATOMIC);
492 if (skb == NULL)
493 goto errout;
494
495 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, dev);
496 if (err < 0) {
497 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
498 WARN_ON(err == -EMSGSIZE);
499 kfree_skb(skb);
500 goto errout;
501 }
502 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
503 return;
504 errout:
505 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
506 }
507
508 /*
509 * Dump information about all ports, in response to GETLINK
510 */
br_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)511 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
512 struct net_device *dev, u32 filter_mask, int nlflags)
513 {
514 struct net_bridge_port *port = br_port_get_rtnl(dev);
515
516 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
517 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
518 return 0;
519
520 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
521 filter_mask, dev);
522 }
523
br_vlan_info(struct net_bridge * br,struct net_bridge_port * p,int cmd,struct bridge_vlan_info * vinfo,bool * changed,struct netlink_ext_ack * extack)524 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
525 int cmd, struct bridge_vlan_info *vinfo, bool *changed,
526 struct netlink_ext_ack *extack)
527 {
528 bool curr_change;
529 int err = 0;
530
531 switch (cmd) {
532 case RTM_SETLINK:
533 if (p) {
534 /* if the MASTER flag is set this will act on the global
535 * per-VLAN entry as well
536 */
537 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
538 &curr_change, extack);
539 } else {
540 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
541 err = br_vlan_add(br, vinfo->vid, vinfo->flags,
542 &curr_change, extack);
543 }
544 if (curr_change)
545 *changed = true;
546 break;
547
548 case RTM_DELLINK:
549 if (p) {
550 if (!nbp_vlan_delete(p, vinfo->vid))
551 *changed = true;
552
553 if ((vinfo->flags & BRIDGE_VLAN_INFO_MASTER) &&
554 !br_vlan_delete(p->br, vinfo->vid))
555 *changed = true;
556 } else if (!br_vlan_delete(br, vinfo->vid)) {
557 *changed = true;
558 }
559 break;
560 }
561
562 return err;
563 }
564
br_process_vlan_info(struct net_bridge * br,struct net_bridge_port * p,int cmd,struct bridge_vlan_info * vinfo_curr,struct bridge_vlan_info ** vinfo_last,bool * changed,struct netlink_ext_ack * extack)565 static int br_process_vlan_info(struct net_bridge *br,
566 struct net_bridge_port *p, int cmd,
567 struct bridge_vlan_info *vinfo_curr,
568 struct bridge_vlan_info **vinfo_last,
569 bool *changed,
570 struct netlink_ext_ack *extack)
571 {
572 if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
573 return -EINVAL;
574
575 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
576 /* check if we are already processing a range */
577 if (*vinfo_last)
578 return -EINVAL;
579 *vinfo_last = vinfo_curr;
580 /* don't allow range of pvids */
581 if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID)
582 return -EINVAL;
583 return 0;
584 }
585
586 if (*vinfo_last) {
587 struct bridge_vlan_info tmp_vinfo;
588 int v, err;
589
590 if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END))
591 return -EINVAL;
592
593 if (vinfo_curr->vid <= (*vinfo_last)->vid)
594 return -EINVAL;
595
596 memcpy(&tmp_vinfo, *vinfo_last,
597 sizeof(struct bridge_vlan_info));
598 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
599 tmp_vinfo.vid = v;
600 err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed,
601 extack);
602 if (err)
603 break;
604 }
605 *vinfo_last = NULL;
606
607 return err;
608 }
609
610 return br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
611 }
612
br_afspec(struct net_bridge * br,struct net_bridge_port * p,struct nlattr * af_spec,int cmd,bool * changed,struct netlink_ext_ack * extack)613 static int br_afspec(struct net_bridge *br,
614 struct net_bridge_port *p,
615 struct nlattr *af_spec,
616 int cmd, bool *changed,
617 struct netlink_ext_ack *extack)
618 {
619 struct bridge_vlan_info *vinfo_curr = NULL;
620 struct bridge_vlan_info *vinfo_last = NULL;
621 struct nlattr *attr;
622 struct vtunnel_info tinfo_last = {};
623 struct vtunnel_info tinfo_curr = {};
624 int err = 0, rem;
625
626 nla_for_each_nested(attr, af_spec, rem) {
627 err = 0;
628 switch (nla_type(attr)) {
629 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
630 if (!p || !(p->flags & BR_VLAN_TUNNEL))
631 return -EINVAL;
632 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
633 if (err)
634 return err;
635 err = br_process_vlan_tunnel_info(br, p, cmd,
636 &tinfo_curr,
637 &tinfo_last,
638 changed);
639 if (err)
640 return err;
641 break;
642 case IFLA_BRIDGE_VLAN_INFO:
643 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
644 return -EINVAL;
645 vinfo_curr = nla_data(attr);
646 err = br_process_vlan_info(br, p, cmd, vinfo_curr,
647 &vinfo_last, changed,
648 extack);
649 if (err)
650 return err;
651 break;
652 }
653 }
654
655 return err;
656 }
657
658 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
659 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
660 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
661 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
662 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
663 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
664 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
665 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
666 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
667 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
668 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
669 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
670 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
671 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
672 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
673 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
674 [IFLA_BRPORT_VLAN_TUNNEL] = { .type = NLA_U8 },
675 [IFLA_BRPORT_GROUP_FWD_MASK] = { .type = NLA_U16 },
676 [IFLA_BRPORT_NEIGH_SUPPRESS] = { .type = NLA_U8 },
677 [IFLA_BRPORT_ISOLATED] = { .type = NLA_U8 },
678 [IFLA_BRPORT_BACKUP_PORT] = { .type = NLA_U32 },
679 };
680
681 /* Change the state of the port and notify spanning tree */
br_set_port_state(struct net_bridge_port * p,u8 state)682 static int br_set_port_state(struct net_bridge_port *p, u8 state)
683 {
684 if (state > BR_STATE_BLOCKING)
685 return -EINVAL;
686
687 /* if kernel STP is running, don't allow changes */
688 if (p->br->stp_enabled == BR_KERNEL_STP)
689 return -EBUSY;
690
691 /* if device is not up, change is not allowed
692 * if link is not present, only allowable state is disabled
693 */
694 if (!netif_running(p->dev) ||
695 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
696 return -ENETDOWN;
697
698 br_set_state(p, state);
699 br_port_state_selection(p->br);
700 return 0;
701 }
702
703 /* Set/clear or port flags based on attribute */
br_set_port_flag(struct net_bridge_port * p,struct nlattr * tb[],int attrtype,unsigned long mask)704 static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
705 int attrtype, unsigned long mask)
706 {
707 unsigned long flags;
708 int err;
709
710 if (!tb[attrtype])
711 return 0;
712
713 if (nla_get_u8(tb[attrtype]))
714 flags = p->flags | mask;
715 else
716 flags = p->flags & ~mask;
717
718 err = br_switchdev_set_port_flag(p, flags, mask);
719 if (err)
720 return err;
721
722 p->flags = flags;
723 return 0;
724 }
725
726 /* Process bridge protocol info on port */
br_setport(struct net_bridge_port * p,struct nlattr * tb[])727 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
728 {
729 unsigned long old_flags = p->flags;
730 bool br_vlan_tunnel_old = false;
731 int err;
732
733 err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
734 if (err)
735 return err;
736
737 err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
738 if (err)
739 return err;
740
741 err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
742 if (err)
743 return err;
744
745 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
746 if (err)
747 return err;
748
749 err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
750 if (err)
751 return err;
752
753 err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
754 if (err)
755 return err;
756
757 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
758 if (err)
759 return err;
760
761 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
762 if (err)
763 return err;
764
765 err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
766 if (err)
767 return err;
768
769 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
770 if (err)
771 return err;
772
773 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
774 if (err)
775 return err;
776
777 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
778 err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
779 if (err)
780 return err;
781
782 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
783 nbp_vlan_tunnel_info_flush(p);
784
785 if (tb[IFLA_BRPORT_COST]) {
786 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
787 if (err)
788 return err;
789 }
790
791 if (tb[IFLA_BRPORT_PRIORITY]) {
792 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
793 if (err)
794 return err;
795 }
796
797 if (tb[IFLA_BRPORT_STATE]) {
798 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
799 if (err)
800 return err;
801 }
802
803 if (tb[IFLA_BRPORT_FLUSH])
804 br_fdb_delete_by_port(p->br, p, 0, 0);
805
806 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
807 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
808 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
809
810 err = br_multicast_set_port_router(p, mcast_router);
811 if (err)
812 return err;
813 }
814 #endif
815
816 if (tb[IFLA_BRPORT_GROUP_FWD_MASK]) {
817 u16 fwd_mask = nla_get_u16(tb[IFLA_BRPORT_GROUP_FWD_MASK]);
818
819 if (fwd_mask & BR_GROUPFWD_MACPAUSE)
820 return -EINVAL;
821 p->group_fwd_mask = fwd_mask;
822 }
823
824 err = br_set_port_flag(p, tb, IFLA_BRPORT_NEIGH_SUPPRESS,
825 BR_NEIGH_SUPPRESS);
826 if (err)
827 return err;
828
829 err = br_set_port_flag(p, tb, IFLA_BRPORT_ISOLATED, BR_ISOLATED);
830 if (err)
831 return err;
832
833 if (tb[IFLA_BRPORT_BACKUP_PORT]) {
834 struct net_device *backup_dev = NULL;
835 u32 backup_ifindex;
836
837 backup_ifindex = nla_get_u32(tb[IFLA_BRPORT_BACKUP_PORT]);
838 if (backup_ifindex) {
839 backup_dev = __dev_get_by_index(dev_net(p->dev),
840 backup_ifindex);
841 if (!backup_dev)
842 return -ENOENT;
843 }
844
845 err = nbp_backup_change(p, backup_dev);
846 if (err)
847 return err;
848 }
849
850 br_port_flags_change(p, old_flags ^ p->flags);
851 return 0;
852 }
853
854 /* Change state and parameters on port. */
br_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)855 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
856 struct netlink_ext_ack *extack)
857 {
858 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
859 struct nlattr *tb[IFLA_BRPORT_MAX + 1];
860 struct net_bridge_port *p;
861 struct nlattr *protinfo;
862 struct nlattr *afspec;
863 bool changed = false;
864 int err = 0;
865
866 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
867 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
868 if (!protinfo && !afspec)
869 return 0;
870
871 p = br_port_get_rtnl(dev);
872 /* We want to accept dev as bridge itself if the AF_SPEC
873 * is set to see if someone is setting vlan info on the bridge
874 */
875 if (!p && !afspec)
876 return -EINVAL;
877
878 if (p && protinfo) {
879 if (protinfo->nla_type & NLA_F_NESTED) {
880 err = nla_parse_nested_deprecated(tb, IFLA_BRPORT_MAX,
881 protinfo,
882 br_port_policy,
883 NULL);
884 if (err)
885 return err;
886
887 spin_lock_bh(&p->br->lock);
888 err = br_setport(p, tb);
889 spin_unlock_bh(&p->br->lock);
890 } else {
891 /* Binary compatibility with old RSTP */
892 if (nla_len(protinfo) < sizeof(u8))
893 return -EINVAL;
894
895 spin_lock_bh(&p->br->lock);
896 err = br_set_port_state(p, nla_get_u8(protinfo));
897 spin_unlock_bh(&p->br->lock);
898 }
899 if (err)
900 goto out;
901 changed = true;
902 }
903
904 if (afspec)
905 err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
906
907 if (changed)
908 br_ifinfo_notify(RTM_NEWLINK, br, p);
909 out:
910 return err;
911 }
912
913 /* Delete port information */
br_dellink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags)914 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
915 {
916 struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
917 struct net_bridge_port *p;
918 struct nlattr *afspec;
919 bool changed = false;
920 int err = 0;
921
922 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
923 if (!afspec)
924 return 0;
925
926 p = br_port_get_rtnl(dev);
927 /* We want to accept dev as bridge itself as well */
928 if (!p && !(dev->priv_flags & IFF_EBRIDGE))
929 return -EINVAL;
930
931 err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
932 if (changed)
933 /* Send RTM_NEWLINK because userspace
934 * expects RTM_NEWLINK for vlan dels
935 */
936 br_ifinfo_notify(RTM_NEWLINK, br, p);
937
938 return err;
939 }
940
br_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)941 static int br_validate(struct nlattr *tb[], struct nlattr *data[],
942 struct netlink_ext_ack *extack)
943 {
944 if (tb[IFLA_ADDRESS]) {
945 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
946 return -EINVAL;
947 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
948 return -EADDRNOTAVAIL;
949 }
950
951 if (!data)
952 return 0;
953
954 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
955 if (data[IFLA_BR_VLAN_PROTOCOL]) {
956 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
957 case htons(ETH_P_8021Q):
958 case htons(ETH_P_8021AD):
959 break;
960 default:
961 return -EPROTONOSUPPORT;
962 }
963 }
964
965 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
966 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
967
968 if (defpvid >= VLAN_VID_MASK)
969 return -EINVAL;
970 }
971 #endif
972
973 return 0;
974 }
975
br_port_slave_changelink(struct net_device * brdev,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)976 static int br_port_slave_changelink(struct net_device *brdev,
977 struct net_device *dev,
978 struct nlattr *tb[],
979 struct nlattr *data[],
980 struct netlink_ext_ack *extack)
981 {
982 struct net_bridge *br = netdev_priv(brdev);
983 int ret;
984
985 if (!data)
986 return 0;
987
988 spin_lock_bh(&br->lock);
989 ret = br_setport(br_port_get_rtnl(dev), data);
990 spin_unlock_bh(&br->lock);
991
992 return ret;
993 }
994
br_port_fill_slave_info(struct sk_buff * skb,const struct net_device * brdev,const struct net_device * dev)995 static int br_port_fill_slave_info(struct sk_buff *skb,
996 const struct net_device *brdev,
997 const struct net_device *dev)
998 {
999 return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
1000 }
1001
br_port_get_slave_size(const struct net_device * brdev,const struct net_device * dev)1002 static size_t br_port_get_slave_size(const struct net_device *brdev,
1003 const struct net_device *dev)
1004 {
1005 return br_port_info_size();
1006 }
1007
1008 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
1009 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
1010 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
1011 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
1012 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
1013 [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
1014 [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
1015 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
1016 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
1017 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
1018 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
1019 .len = ETH_ALEN },
1020 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
1021 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
1022 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
1023 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
1024 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
1025 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
1026 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
1027 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
1028 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
1029 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
1030 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
1031 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
1032 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
1033 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
1034 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
1035 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
1036 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
1037 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
1038 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
1039 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
1040 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
1041 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
1042 [IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
1043 [IFLA_BR_MULTI_BOOLOPT] = { .type = NLA_EXACT_LEN,
1044 .len = sizeof(struct br_boolopt_multi) },
1045 };
1046
br_changelink(struct net_device * brdev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1047 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1048 struct nlattr *data[],
1049 struct netlink_ext_ack *extack)
1050 {
1051 struct net_bridge *br = netdev_priv(brdev);
1052 int err;
1053
1054 if (!data)
1055 return 0;
1056
1057 if (data[IFLA_BR_FORWARD_DELAY]) {
1058 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
1059 if (err)
1060 return err;
1061 }
1062
1063 if (data[IFLA_BR_HELLO_TIME]) {
1064 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
1065 if (err)
1066 return err;
1067 }
1068
1069 if (data[IFLA_BR_MAX_AGE]) {
1070 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
1071 if (err)
1072 return err;
1073 }
1074
1075 if (data[IFLA_BR_AGEING_TIME]) {
1076 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
1077 if (err)
1078 return err;
1079 }
1080
1081 if (data[IFLA_BR_STP_STATE]) {
1082 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
1083
1084 br_stp_set_enabled(br, stp_enabled);
1085 }
1086
1087 if (data[IFLA_BR_PRIORITY]) {
1088 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
1089
1090 br_stp_set_bridge_priority(br, priority);
1091 }
1092
1093 if (data[IFLA_BR_VLAN_FILTERING]) {
1094 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
1095
1096 err = __br_vlan_filter_toggle(br, vlan_filter);
1097 if (err)
1098 return err;
1099 }
1100
1101 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1102 if (data[IFLA_BR_VLAN_PROTOCOL]) {
1103 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
1104
1105 err = __br_vlan_set_proto(br, vlan_proto);
1106 if (err)
1107 return err;
1108 }
1109
1110 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
1111 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
1112
1113 err = __br_vlan_set_default_pvid(br, defpvid, extack);
1114 if (err)
1115 return err;
1116 }
1117
1118 if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
1119 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
1120
1121 err = br_vlan_set_stats(br, vlan_stats);
1122 if (err)
1123 return err;
1124 }
1125
1126 if (data[IFLA_BR_VLAN_STATS_PER_PORT]) {
1127 __u8 per_port = nla_get_u8(data[IFLA_BR_VLAN_STATS_PER_PORT]);
1128
1129 err = br_vlan_set_stats_per_port(br, per_port);
1130 if (err)
1131 return err;
1132 }
1133 #endif
1134
1135 if (data[IFLA_BR_GROUP_FWD_MASK]) {
1136 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
1137
1138 if (fwd_mask & BR_GROUPFWD_RESTRICTED)
1139 return -EINVAL;
1140 br->group_fwd_mask = fwd_mask;
1141 }
1142
1143 if (data[IFLA_BR_GROUP_ADDR]) {
1144 u8 new_addr[ETH_ALEN];
1145
1146 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
1147 return -EINVAL;
1148 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
1149 if (!is_link_local_ether_addr(new_addr))
1150 return -EINVAL;
1151 if (new_addr[5] == 1 || /* 802.3x Pause address */
1152 new_addr[5] == 2 || /* 802.3ad Slow protocols */
1153 new_addr[5] == 3) /* 802.1X PAE address */
1154 return -EINVAL;
1155 spin_lock_bh(&br->lock);
1156 memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
1157 spin_unlock_bh(&br->lock);
1158 br_opt_toggle(br, BROPT_GROUP_ADDR_SET, true);
1159 br_recalculate_fwd_mask(br);
1160 }
1161
1162 if (data[IFLA_BR_FDB_FLUSH])
1163 br_fdb_flush(br);
1164
1165 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1166 if (data[IFLA_BR_MCAST_ROUTER]) {
1167 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
1168
1169 err = br_multicast_set_router(br, multicast_router);
1170 if (err)
1171 return err;
1172 }
1173
1174 if (data[IFLA_BR_MCAST_SNOOPING]) {
1175 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
1176
1177 br_multicast_toggle(br, mcast_snooping);
1178 }
1179
1180 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
1181 u8 val;
1182
1183 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
1184 br_opt_toggle(br, BROPT_MULTICAST_QUERY_USE_IFADDR, !!val);
1185 }
1186
1187 if (data[IFLA_BR_MCAST_QUERIER]) {
1188 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
1189
1190 err = br_multicast_set_querier(br, mcast_querier);
1191 if (err)
1192 return err;
1193 }
1194
1195 if (data[IFLA_BR_MCAST_HASH_ELASTICITY])
1196 br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
1197 RHT_ELASTICITY);
1198
1199 if (data[IFLA_BR_MCAST_HASH_MAX])
1200 br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
1201
1202 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
1203 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
1204
1205 br->multicast_last_member_count = val;
1206 }
1207
1208 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
1209 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
1210
1211 br->multicast_startup_query_count = val;
1212 }
1213
1214 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
1215 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
1216
1217 br->multicast_last_member_interval = clock_t_to_jiffies(val);
1218 }
1219
1220 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
1221 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
1222
1223 br->multicast_membership_interval = clock_t_to_jiffies(val);
1224 }
1225
1226 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
1227 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
1228
1229 br->multicast_querier_interval = clock_t_to_jiffies(val);
1230 }
1231
1232 if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
1233 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
1234
1235 br->multicast_query_interval = clock_t_to_jiffies(val);
1236 }
1237
1238 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
1239 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
1240
1241 br->multicast_query_response_interval = clock_t_to_jiffies(val);
1242 }
1243
1244 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
1245 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
1246
1247 br->multicast_startup_query_interval = clock_t_to_jiffies(val);
1248 }
1249
1250 if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
1251 __u8 mcast_stats;
1252
1253 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1254 br_opt_toggle(br, BROPT_MULTICAST_STATS_ENABLED, !!mcast_stats);
1255 }
1256
1257 if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
1258 __u8 igmp_version;
1259
1260 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
1261 err = br_multicast_set_igmp_version(br, igmp_version);
1262 if (err)
1263 return err;
1264 }
1265
1266 #if IS_ENABLED(CONFIG_IPV6)
1267 if (data[IFLA_BR_MCAST_MLD_VERSION]) {
1268 __u8 mld_version;
1269
1270 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
1271 err = br_multicast_set_mld_version(br, mld_version);
1272 if (err)
1273 return err;
1274 }
1275 #endif
1276 #endif
1277 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1278 if (data[IFLA_BR_NF_CALL_IPTABLES]) {
1279 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
1280
1281 br_opt_toggle(br, BROPT_NF_CALL_IPTABLES, !!val);
1282 }
1283
1284 if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
1285 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
1286
1287 br_opt_toggle(br, BROPT_NF_CALL_IP6TABLES, !!val);
1288 }
1289
1290 if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
1291 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
1292
1293 br_opt_toggle(br, BROPT_NF_CALL_ARPTABLES, !!val);
1294 }
1295 #endif
1296
1297 if (data[IFLA_BR_MULTI_BOOLOPT]) {
1298 struct br_boolopt_multi *bm;
1299
1300 bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]);
1301 err = br_boolopt_multi_toggle(br, bm, extack);
1302 if (err)
1303 return err;
1304 }
1305
1306 return 0;
1307 }
1308
br_dev_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1309 static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1310 struct nlattr *tb[], struct nlattr *data[],
1311 struct netlink_ext_ack *extack)
1312 {
1313 struct net_bridge *br = netdev_priv(dev);
1314 int err;
1315
1316 err = register_netdevice(dev);
1317 if (err)
1318 return err;
1319
1320 if (tb[IFLA_ADDRESS]) {
1321 spin_lock_bh(&br->lock);
1322 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1323 spin_unlock_bh(&br->lock);
1324 }
1325
1326 err = br_changelink(dev, tb, data, extack);
1327 if (err)
1328 br_dev_delete(dev, NULL);
1329
1330 return err;
1331 }
1332
br_get_size(const struct net_device * brdev)1333 static size_t br_get_size(const struct net_device *brdev)
1334 {
1335 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
1336 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
1337 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
1338 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
1339 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
1340 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
1341 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
1342 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1343 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
1344 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
1345 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
1346 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_PER_PORT */
1347 #endif
1348 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
1349 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
1350 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
1351 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
1352 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
1353 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
1354 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
1355 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
1356 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
1357 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
1358 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
1359 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
1360 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1361 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
1362 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
1363 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1364 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
1365 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
1366 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
1367 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
1368 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
1369 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
1370 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
1371 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
1372 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
1373 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
1374 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
1375 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
1376 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */
1377 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */
1378 #endif
1379 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1380 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
1381 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
1382 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
1383 #endif
1384 nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */
1385 0;
1386 }
1387
br_fill_info(struct sk_buff * skb,const struct net_device * brdev)1388 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1389 {
1390 struct net_bridge *br = netdev_priv(brdev);
1391 u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
1392 u32 hello_time = jiffies_to_clock_t(br->hello_time);
1393 u32 age_time = jiffies_to_clock_t(br->max_age);
1394 u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
1395 u32 stp_enabled = br->stp_enabled;
1396 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
1397 u8 vlan_enabled = br_vlan_enabled(br->dev);
1398 struct br_boolopt_multi bm;
1399 u64 clockval;
1400
1401 clockval = br_timer_value(&br->hello_timer);
1402 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
1403 return -EMSGSIZE;
1404 clockval = br_timer_value(&br->tcn_timer);
1405 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
1406 return -EMSGSIZE;
1407 clockval = br_timer_value(&br->topology_change_timer);
1408 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
1409 IFLA_BR_PAD))
1410 return -EMSGSIZE;
1411 clockval = br_timer_value(&br->gc_work.timer);
1412 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
1413 return -EMSGSIZE;
1414
1415 br_boolopt_multi_get(br, &bm);
1416 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
1417 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
1418 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
1419 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
1420 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
1421 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
1422 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
1423 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
1424 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
1425 &br->bridge_id) ||
1426 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
1427 &br->designated_root) ||
1428 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
1429 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
1430 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
1431 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
1432 br->topology_change_detected) ||
1433 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
1434 nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
1435 return -EMSGSIZE;
1436
1437 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1438 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
1439 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
1440 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
1441 br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
1442 nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
1443 br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
1444 return -EMSGSIZE;
1445 #endif
1446 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1447 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
1448 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING,
1449 br_opt_get(br, BROPT_MULTICAST_ENABLED)) ||
1450 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1451 br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR)) ||
1452 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER,
1453 br_opt_get(br, BROPT_MULTICAST_QUERIER)) ||
1454 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1455 br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
1456 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
1457 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
1458 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
1459 br->multicast_last_member_count) ||
1460 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
1461 br->multicast_startup_query_count) ||
1462 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
1463 br->multicast_igmp_version))
1464 return -EMSGSIZE;
1465 #if IS_ENABLED(CONFIG_IPV6)
1466 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
1467 br->multicast_mld_version))
1468 return -EMSGSIZE;
1469 #endif
1470 clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
1471 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
1472 IFLA_BR_PAD))
1473 return -EMSGSIZE;
1474 clockval = jiffies_to_clock_t(br->multicast_membership_interval);
1475 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
1476 IFLA_BR_PAD))
1477 return -EMSGSIZE;
1478 clockval = jiffies_to_clock_t(br->multicast_querier_interval);
1479 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
1480 IFLA_BR_PAD))
1481 return -EMSGSIZE;
1482 clockval = jiffies_to_clock_t(br->multicast_query_interval);
1483 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
1484 IFLA_BR_PAD))
1485 return -EMSGSIZE;
1486 clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
1487 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
1488 IFLA_BR_PAD))
1489 return -EMSGSIZE;
1490 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
1491 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
1492 IFLA_BR_PAD))
1493 return -EMSGSIZE;
1494 #endif
1495 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1496 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
1497 br_opt_get(br, BROPT_NF_CALL_IPTABLES) ? 1 : 0) ||
1498 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
1499 br_opt_get(br, BROPT_NF_CALL_IP6TABLES) ? 1 : 0) ||
1500 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
1501 br_opt_get(br, BROPT_NF_CALL_ARPTABLES) ? 1 : 0))
1502 return -EMSGSIZE;
1503 #endif
1504
1505 return 0;
1506 }
1507
br_get_linkxstats_size(const struct net_device * dev,int attr)1508 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1509 {
1510 struct net_bridge_port *p = NULL;
1511 struct net_bridge_vlan_group *vg;
1512 struct net_bridge_vlan *v;
1513 struct net_bridge *br;
1514 int numvls = 0;
1515
1516 switch (attr) {
1517 case IFLA_STATS_LINK_XSTATS:
1518 br = netdev_priv(dev);
1519 vg = br_vlan_group(br);
1520 break;
1521 case IFLA_STATS_LINK_XSTATS_SLAVE:
1522 p = br_port_get_rtnl(dev);
1523 if (!p)
1524 return 0;
1525 br = p->br;
1526 vg = nbp_vlan_group(p);
1527 break;
1528 default:
1529 return 0;
1530 }
1531
1532 if (vg) {
1533 /* we need to count all, even placeholder entries */
1534 list_for_each_entry(v, &vg->vlan_list, vlist)
1535 numvls++;
1536 }
1537
1538 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1539 nla_total_size_64bit(sizeof(struct br_mcast_stats)) +
1540 nla_total_size(0);
1541 }
1542
br_fill_linkxstats(struct sk_buff * skb,const struct net_device * dev,int * prividx,int attr)1543 static int br_fill_linkxstats(struct sk_buff *skb,
1544 const struct net_device *dev,
1545 int *prividx, int attr)
1546 {
1547 struct nlattr *nla __maybe_unused;
1548 struct net_bridge_port *p = NULL;
1549 struct net_bridge_vlan_group *vg;
1550 struct net_bridge_vlan *v;
1551 struct net_bridge *br;
1552 struct nlattr *nest;
1553 int vl_idx = 0;
1554
1555 switch (attr) {
1556 case IFLA_STATS_LINK_XSTATS:
1557 br = netdev_priv(dev);
1558 vg = br_vlan_group(br);
1559 break;
1560 case IFLA_STATS_LINK_XSTATS_SLAVE:
1561 p = br_port_get_rtnl(dev);
1562 if (!p)
1563 return 0;
1564 br = p->br;
1565 vg = nbp_vlan_group(p);
1566 break;
1567 default:
1568 return -EINVAL;
1569 }
1570
1571 nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BRIDGE);
1572 if (!nest)
1573 return -EMSGSIZE;
1574
1575 if (vg) {
1576 u16 pvid;
1577
1578 pvid = br_get_pvid(vg);
1579 list_for_each_entry(v, &vg->vlan_list, vlist) {
1580 struct bridge_vlan_xstats vxi;
1581 struct br_vlan_stats stats;
1582
1583 if (++vl_idx < *prividx)
1584 continue;
1585 memset(&vxi, 0, sizeof(vxi));
1586 vxi.vid = v->vid;
1587 vxi.flags = v->flags;
1588 if (v->vid == pvid)
1589 vxi.flags |= BRIDGE_VLAN_INFO_PVID;
1590 br_vlan_get_stats(v, &stats);
1591 vxi.rx_bytes = stats.rx_bytes;
1592 vxi.rx_packets = stats.rx_packets;
1593 vxi.tx_bytes = stats.tx_bytes;
1594 vxi.tx_packets = stats.tx_packets;
1595
1596 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
1597 goto nla_put_failure;
1598 }
1599 }
1600
1601 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1602 if (++vl_idx >= *prividx) {
1603 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1604 sizeof(struct br_mcast_stats),
1605 BRIDGE_XSTATS_PAD);
1606 if (!nla)
1607 goto nla_put_failure;
1608 br_multicast_get_stats(br, p, nla_data(nla));
1609 }
1610 #endif
1611 nla_nest_end(skb, nest);
1612 *prividx = 0;
1613
1614 return 0;
1615
1616 nla_put_failure:
1617 nla_nest_end(skb, nest);
1618 *prividx = vl_idx;
1619
1620 return -EMSGSIZE;
1621 }
1622
1623 static struct rtnl_af_ops br_af_ops __read_mostly = {
1624 .family = AF_BRIDGE,
1625 .get_link_af_size = br_get_link_af_size_filtered,
1626 };
1627
1628 struct rtnl_link_ops br_link_ops __read_mostly = {
1629 .kind = "bridge",
1630 .priv_size = sizeof(struct net_bridge),
1631 .setup = br_dev_setup,
1632 .maxtype = IFLA_BR_MAX,
1633 .policy = br_policy,
1634 .validate = br_validate,
1635 .newlink = br_dev_newlink,
1636 .changelink = br_changelink,
1637 .dellink = br_dev_delete,
1638 .get_size = br_get_size,
1639 .fill_info = br_fill_info,
1640 .fill_linkxstats = br_fill_linkxstats,
1641 .get_linkxstats_size = br_get_linkxstats_size,
1642
1643 .slave_maxtype = IFLA_BRPORT_MAX,
1644 .slave_policy = br_port_policy,
1645 .slave_changelink = br_port_slave_changelink,
1646 .get_slave_size = br_port_get_slave_size,
1647 .fill_slave_info = br_port_fill_slave_info,
1648 };
1649
br_netlink_init(void)1650 int __init br_netlink_init(void)
1651 {
1652 int err;
1653
1654 br_mdb_init();
1655 rtnl_af_register(&br_af_ops);
1656
1657 err = rtnl_link_register(&br_link_ops);
1658 if (err)
1659 goto out_af;
1660
1661 return 0;
1662
1663 out_af:
1664 rtnl_af_unregister(&br_af_ops);
1665 br_mdb_uninit();
1666 return err;
1667 }
1668
br_netlink_fini(void)1669 void br_netlink_fini(void)
1670 {
1671 br_mdb_uninit();
1672 rtnl_af_unregister(&br_af_ops);
1673 rtnl_link_unregister(&br_link_ops);
1674 }
1675