• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * net/dsa/dsa_priv.h - Hardware switch handling
4  * Copyright (c) 2008-2009 Marvell Semiconductor
5  */
6 
7 #ifndef __DSA_PRIV_H
8 #define __DSA_PRIV_H
9 
10 #include <linux/if_bridge.h>
11 #include <linux/phy.h>
12 #include <linux/netdevice.h>
13 #include <linux/netpoll.h>
14 #include <net/dsa.h>
15 #include <net/gro_cells.h>
16 
17 #define DSA_MAX_NUM_OFFLOADING_BRIDGES		BITS_PER_LONG
18 
19 enum {
20 	DSA_NOTIFIER_AGEING_TIME,
21 	DSA_NOTIFIER_BRIDGE_JOIN,
22 	DSA_NOTIFIER_BRIDGE_LEAVE,
23 	DSA_NOTIFIER_FDB_ADD,
24 	DSA_NOTIFIER_FDB_DEL,
25 	DSA_NOTIFIER_HOST_FDB_ADD,
26 	DSA_NOTIFIER_HOST_FDB_DEL,
27 	DSA_NOTIFIER_HSR_JOIN,
28 	DSA_NOTIFIER_HSR_LEAVE,
29 	DSA_NOTIFIER_LAG_CHANGE,
30 	DSA_NOTIFIER_LAG_JOIN,
31 	DSA_NOTIFIER_LAG_LEAVE,
32 	DSA_NOTIFIER_MDB_ADD,
33 	DSA_NOTIFIER_MDB_DEL,
34 	DSA_NOTIFIER_HOST_MDB_ADD,
35 	DSA_NOTIFIER_HOST_MDB_DEL,
36 	DSA_NOTIFIER_VLAN_ADD,
37 	DSA_NOTIFIER_VLAN_DEL,
38 	DSA_NOTIFIER_MTU,
39 	DSA_NOTIFIER_TAG_PROTO,
40 	DSA_NOTIFIER_MRP_ADD,
41 	DSA_NOTIFIER_MRP_DEL,
42 	DSA_NOTIFIER_MRP_ADD_RING_ROLE,
43 	DSA_NOTIFIER_MRP_DEL_RING_ROLE,
44 	DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
45 	DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
46 };
47 
48 /* DSA_NOTIFIER_AGEING_TIME */
49 struct dsa_notifier_ageing_time_info {
50 	unsigned int ageing_time;
51 };
52 
53 /* DSA_NOTIFIER_BRIDGE_* */
54 struct dsa_notifier_bridge_info {
55 	struct net_device *br;
56 	int tree_index;
57 	int sw_index;
58 	int port;
59 };
60 
61 /* DSA_NOTIFIER_FDB_* */
62 struct dsa_notifier_fdb_info {
63 	int sw_index;
64 	int port;
65 	const unsigned char *addr;
66 	u16 vid;
67 };
68 
69 /* DSA_NOTIFIER_MDB_* */
70 struct dsa_notifier_mdb_info {
71 	const struct switchdev_obj_port_mdb *mdb;
72 	int sw_index;
73 	int port;
74 };
75 
76 /* DSA_NOTIFIER_LAG_* */
77 struct dsa_notifier_lag_info {
78 	struct net_device *lag;
79 	int sw_index;
80 	int port;
81 
82 	struct netdev_lag_upper_info *info;
83 };
84 
85 /* DSA_NOTIFIER_VLAN_* */
86 struct dsa_notifier_vlan_info {
87 	const struct switchdev_obj_port_vlan *vlan;
88 	int sw_index;
89 	int port;
90 	struct netlink_ext_ack *extack;
91 };
92 
93 /* DSA_NOTIFIER_MTU */
94 struct dsa_notifier_mtu_info {
95 	bool targeted_match;
96 	int sw_index;
97 	int port;
98 	int mtu;
99 };
100 
101 /* DSA_NOTIFIER_TAG_PROTO_* */
102 struct dsa_notifier_tag_proto_info {
103 	const struct dsa_device_ops *tag_ops;
104 };
105 
106 /* DSA_NOTIFIER_MRP_* */
107 struct dsa_notifier_mrp_info {
108 	const struct switchdev_obj_mrp *mrp;
109 	int sw_index;
110 	int port;
111 };
112 
113 /* DSA_NOTIFIER_MRP_* */
114 struct dsa_notifier_mrp_ring_role_info {
115 	const struct switchdev_obj_ring_role_mrp *mrp;
116 	int sw_index;
117 	int port;
118 };
119 
120 /* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
121 struct dsa_notifier_tag_8021q_vlan_info {
122 	int tree_index;
123 	int sw_index;
124 	int port;
125 	u16 vid;
126 };
127 
128 struct dsa_switchdev_event_work {
129 	struct dsa_switch *ds;
130 	int port;
131 	struct net_device *dev;
132 	struct work_struct work;
133 	unsigned long event;
134 	/* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
135 	 * SWITCHDEV_FDB_DEL_TO_DEVICE
136 	 */
137 	unsigned char addr[ETH_ALEN];
138 	u16 vid;
139 	bool host_addr;
140 };
141 
142 /* DSA_NOTIFIER_HSR_* */
143 struct dsa_notifier_hsr_info {
144 	struct net_device *hsr;
145 	int sw_index;
146 	int port;
147 };
148 
149 struct dsa_slave_priv {
150 	/* Copy of CPU port xmit for faster access in slave transmit hot path */
151 	struct sk_buff *	(*xmit)(struct sk_buff *skb,
152 					struct net_device *dev);
153 
154 	struct gro_cells	gcells;
155 
156 	/* DSA port data, such as switch, port index, etc. */
157 	struct dsa_port		*dp;
158 
159 #ifdef CONFIG_NET_POLL_CONTROLLER
160 	struct netpoll		*netpoll;
161 #endif
162 
163 	/* TC context */
164 	struct list_head	mall_tc_list;
165 };
166 
167 /* dsa.c */
168 const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
169 void dsa_tag_driver_put(const struct dsa_device_ops *ops);
170 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
171 
172 bool dsa_schedule_work(struct work_struct *work);
173 void dsa_flush_workqueue(void);
174 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
175 
dsa_tag_protocol_overhead(const struct dsa_device_ops * ops)176 static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
177 {
178 	return ops->needed_headroom + ops->needed_tailroom;
179 }
180 
181 /* master.c */
182 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
183 void dsa_master_teardown(struct net_device *dev);
184 
dsa_master_find_slave(struct net_device * dev,int device,int port)185 static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
186 						       int device, int port)
187 {
188 	struct dsa_port *cpu_dp = dev->dsa_ptr;
189 	struct dsa_switch_tree *dst = cpu_dp->dst;
190 	struct dsa_port *dp;
191 
192 	list_for_each_entry(dp, &dst->ports, list)
193 		if (dp->ds->index == device && dp->index == port &&
194 		    dp->type == DSA_PORT_TYPE_USER)
195 			return dp->slave;
196 
197 	return NULL;
198 }
199 
200 /* port.c */
201 bool dsa_port_supports_hwtstamp(struct dsa_port *dp, struct ifreq *ifr);
202 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
203 			       const struct dsa_device_ops *tag_ops);
204 int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
205 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
206 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
207 void dsa_port_disable_rt(struct dsa_port *dp);
208 void dsa_port_disable(struct dsa_port *dp);
209 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
210 			 struct netlink_ext_ack *extack);
211 void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br);
212 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
213 int dsa_port_lag_change(struct dsa_port *dp,
214 			struct netdev_lag_lower_state_info *linfo);
215 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
216 		      struct netdev_lag_upper_info *uinfo,
217 		      struct netlink_ext_ack *extack);
218 void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
219 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
220 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
221 			    struct netlink_ext_ack *extack);
222 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
223 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
224 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
225 			bool targeted_match);
226 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
227 		     u16 vid);
228 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
229 		     u16 vid);
230 int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
231 			  u16 vid);
232 int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
233 			  u16 vid);
234 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
235 int dsa_port_mdb_add(const struct dsa_port *dp,
236 		     const struct switchdev_obj_port_mdb *mdb);
237 int dsa_port_mdb_del(const struct dsa_port *dp,
238 		     const struct switchdev_obj_port_mdb *mdb);
239 int dsa_port_host_mdb_add(const struct dsa_port *dp,
240 			  const struct switchdev_obj_port_mdb *mdb);
241 int dsa_port_host_mdb_del(const struct dsa_port *dp,
242 			  const struct switchdev_obj_port_mdb *mdb);
243 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
244 			      struct switchdev_brport_flags flags,
245 			      struct netlink_ext_ack *extack);
246 int dsa_port_bridge_flags(struct dsa_port *dp,
247 			  struct switchdev_brport_flags flags,
248 			  struct netlink_ext_ack *extack);
249 int dsa_port_vlan_add(struct dsa_port *dp,
250 		      const struct switchdev_obj_port_vlan *vlan,
251 		      struct netlink_ext_ack *extack);
252 int dsa_port_vlan_del(struct dsa_port *dp,
253 		      const struct switchdev_obj_port_vlan *vlan);
254 int dsa_port_mrp_add(const struct dsa_port *dp,
255 		     const struct switchdev_obj_mrp *mrp);
256 int dsa_port_mrp_del(const struct dsa_port *dp,
257 		     const struct switchdev_obj_mrp *mrp);
258 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
259 			       const struct switchdev_obj_ring_role_mrp *mrp);
260 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
261 			       const struct switchdev_obj_ring_role_mrp *mrp);
262 int dsa_port_link_register_of(struct dsa_port *dp);
263 void dsa_port_link_unregister_of(struct dsa_port *dp);
264 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
265 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
266 int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
267 void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
268 extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
269 
dsa_port_offloads_bridge_port(struct dsa_port * dp,const struct net_device * dev)270 static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
271 						 const struct net_device *dev)
272 {
273 	return dsa_port_to_bridge_port(dp) == dev;
274 }
275 
dsa_port_offloads_bridge(struct dsa_port * dp,const struct net_device * bridge_dev)276 static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
277 					    const struct net_device *bridge_dev)
278 {
279 	/* DSA ports connected to a bridge, and event was emitted
280 	 * for the bridge.
281 	 */
282 	return dp->bridge_dev == bridge_dev;
283 }
284 
285 /* Returns true if any port of this tree offloads the given net_device */
dsa_tree_offloads_bridge_port(struct dsa_switch_tree * dst,const struct net_device * dev)286 static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
287 						 const struct net_device *dev)
288 {
289 	struct dsa_port *dp;
290 
291 	list_for_each_entry(dp, &dst->ports, list)
292 		if (dsa_port_offloads_bridge_port(dp, dev))
293 			return true;
294 
295 	return false;
296 }
297 
298 /* Returns true if any port of this tree offloads the given bridge */
dsa_tree_offloads_bridge(struct dsa_switch_tree * dst,const struct net_device * bridge_dev)299 static inline bool dsa_tree_offloads_bridge(struct dsa_switch_tree *dst,
300 					    const struct net_device *bridge_dev)
301 {
302 	struct dsa_port *dp;
303 
304 	list_for_each_entry(dp, &dst->ports, list)
305 		if (dsa_port_offloads_bridge(dp, bridge_dev))
306 			return true;
307 
308 	return false;
309 }
310 
311 /* slave.c */
312 extern const struct dsa_device_ops notag_netdev_ops;
313 extern struct notifier_block dsa_slave_switchdev_notifier;
314 extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
315 
316 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
317 int dsa_slave_create(struct dsa_port *dp);
318 void dsa_slave_destroy(struct net_device *slave_dev);
319 int dsa_slave_suspend(struct net_device *slave_dev);
320 int dsa_slave_resume(struct net_device *slave_dev);
321 int dsa_slave_register_notifier(void);
322 void dsa_slave_unregister_notifier(void);
323 void dsa_slave_setup_tagger(struct net_device *slave);
324 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
325 int dsa_slave_manage_vlan_filtering(struct net_device *dev,
326 				    bool vlan_filtering);
327 
dsa_slave_to_port(const struct net_device * dev)328 static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
329 {
330 	struct dsa_slave_priv *p = netdev_priv(dev);
331 
332 	return p->dp;
333 }
334 
335 static inline struct net_device *
dsa_slave_to_master(const struct net_device * dev)336 dsa_slave_to_master(const struct net_device *dev)
337 {
338 	struct dsa_port *dp = dsa_slave_to_port(dev);
339 
340 	return dp->cpu_dp->master;
341 }
342 
343 /* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
344  * frames as untagged, since the bridge will not untag them.
345  */
dsa_untag_bridge_pvid(struct sk_buff * skb)346 static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
347 {
348 	struct dsa_port *dp = dsa_slave_to_port(skb->dev);
349 	struct net_device *br = dp->bridge_dev;
350 	struct net_device *dev = skb->dev;
351 	struct net_device *upper_dev;
352 	u16 vid, pvid, proto;
353 	int err;
354 
355 	if (!br || br_vlan_enabled(br))
356 		return skb;
357 
358 	err = br_vlan_get_proto(br, &proto);
359 	if (err)
360 		return skb;
361 
362 	/* Move VLAN tag from data to hwaccel */
363 	if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
364 		skb = skb_vlan_untag(skb);
365 		if (!skb)
366 			return NULL;
367 	}
368 
369 	if (!skb_vlan_tag_present(skb))
370 		return skb;
371 
372 	vid = skb_vlan_tag_get_id(skb);
373 
374 	/* We already run under an RCU read-side critical section since
375 	 * we are called from netif_receive_skb_list_internal().
376 	 */
377 	err = br_vlan_get_pvid_rcu(dev, &pvid);
378 	if (err)
379 		return skb;
380 
381 	if (vid != pvid)
382 		return skb;
383 
384 	/* The sad part about attempting to untag from DSA is that we
385 	 * don't know, unless we check, if the skb will end up in
386 	 * the bridge's data path - br_allowed_ingress() - or not.
387 	 * For example, there might be an 8021q upper for the
388 	 * default_pvid of the bridge, which will steal VLAN-tagged traffic
389 	 * from the bridge's data path. This is a configuration that DSA
390 	 * supports because vlan_filtering is 0. In that case, we should
391 	 * definitely keep the tag, to make sure it keeps working.
392 	 */
393 	upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
394 	if (upper_dev)
395 		return skb;
396 
397 	__vlan_hwaccel_clear_tag(skb);
398 
399 	return skb;
400 }
401 
402 /* For switches without hardware support for DSA tagging to be able
403  * to support termination through the bridge.
404  */
405 static inline struct net_device *
dsa_find_designated_bridge_port_by_vid(struct net_device * master,u16 vid)406 dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
407 {
408 	struct dsa_port *cpu_dp = master->dsa_ptr;
409 	struct dsa_switch_tree *dst = cpu_dp->dst;
410 	struct bridge_vlan_info vinfo;
411 	struct net_device *slave;
412 	struct dsa_port *dp;
413 	int err;
414 
415 	list_for_each_entry(dp, &dst->ports, list) {
416 		if (dp->type != DSA_PORT_TYPE_USER)
417 			continue;
418 
419 		if (!dp->bridge_dev)
420 			continue;
421 
422 		if (dp->stp_state != BR_STATE_LEARNING &&
423 		    dp->stp_state != BR_STATE_FORWARDING)
424 			continue;
425 
426 		/* Since the bridge might learn this packet, keep the CPU port
427 		 * affinity with the port that will be used for the reply on
428 		 * xmit.
429 		 */
430 		if (dp->cpu_dp != cpu_dp)
431 			continue;
432 
433 		slave = dp->slave;
434 
435 		err = br_vlan_get_info_rcu(slave, vid, &vinfo);
436 		if (err)
437 			continue;
438 
439 		return slave;
440 	}
441 
442 	return NULL;
443 }
444 
445 /* If the ingress port offloads the bridge, we mark the frame as autonomously
446  * forwarded by hardware, so the software bridge doesn't forward in twice, back
447  * to us, because we already did. However, if we're in fallback mode and we do
448  * software bridging, we are not offloading it, therefore the dp->bridge_dev
449  * pointer is not populated, and flooding needs to be done by software (we are
450  * effectively operating in standalone ports mode).
451  */
dsa_default_offload_fwd_mark(struct sk_buff * skb)452 static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
453 {
454 	struct dsa_port *dp = dsa_slave_to_port(skb->dev);
455 
456 	skb->offload_fwd_mark = !!(dp->bridge_dev);
457 }
458 
459 /* Helper for removing DSA header tags from packets in the RX path.
460  * Must not be called before skb_pull(len).
461  *                                                                 skb->data
462  *                                                                         |
463  *                                                                         v
464  * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
465  * +-----------------------+-----------------------+---------------+-------+
466  * |    Destination MAC    |      Source MAC       |  DSA header   | EType |
467  * +-----------------------+-----------------------+---------------+-------+
468  *                                                 |               |
469  * <----- len ----->                               <----- len ----->
470  *                 |
471  *       >>>>>>>   v
472  *       >>>>>>>   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
473  *       >>>>>>>   +-----------------------+-----------------------+-------+
474  *       >>>>>>>   |    Destination MAC    |      Source MAC       | EType |
475  *                 +-----------------------+-----------------------+-------+
476  *                                                                         ^
477  *                                                                         |
478  *                                                                 skb->data
479  */
dsa_strip_etype_header(struct sk_buff * skb,int len)480 static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
481 {
482 	memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
483 }
484 
485 /* Helper for creating space for DSA header tags in TX path packets.
486  * Must not be called before skb_push(len).
487  *
488  * Before:
489  *
490  *       <<<<<<<   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
491  * ^     <<<<<<<   +-----------------------+-----------------------+-------+
492  * |     <<<<<<<   |    Destination MAC    |      Source MAC       | EType |
493  * |               +-----------------------+-----------------------+-------+
494  * <----- len ----->
495  * |
496  * |
497  * skb->data
498  *
499  * After:
500  *
501  * |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |   |
502  * +-----------------------+-----------------------+---------------+-------+
503  * |    Destination MAC    |      Source MAC       |  DSA header   | EType |
504  * +-----------------------+-----------------------+---------------+-------+
505  * ^                                               |               |
506  * |                                               <----- len ----->
507  * skb->data
508  */
dsa_alloc_etype_header(struct sk_buff * skb,int len)509 static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
510 {
511 	memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
512 }
513 
514 /* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
515  * skb_mac_header(skb), which leaves skb->data pointing at the first byte after
516  * what the DSA master perceives as the EtherType (the beginning of the L3
517  * protocol). Since DSA EtherType header taggers treat the EtherType as part of
518  * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
519  * is located 2 bytes behind skb->data. Note that EtherType in this context
520  * means the first 2 bytes of the DSA header, not the encapsulated EtherType
521  * that will become visible after the DSA header is stripped.
522  */
dsa_etype_header_pos_rx(struct sk_buff * skb)523 static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
524 {
525 	return skb->data - 2;
526 }
527 
528 /* On TX, skb->data points to skb_mac_header(skb), which means that EtherType
529  * header taggers start exactly where the EtherType is (the EtherType is
530  * treated as part of the DSA header).
531  */
dsa_etype_header_pos_tx(struct sk_buff * skb)532 static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
533 {
534 	return skb->data + 2 * ETH_ALEN;
535 }
536 
537 /* switch.c */
538 int dsa_switch_register_notifier(struct dsa_switch *ds);
539 void dsa_switch_unregister_notifier(struct dsa_switch *ds);
540 
541 /* dsa2.c */
542 void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
543 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
544 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
545 int dsa_broadcast(unsigned long e, void *v);
546 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
547 			      struct net_device *master,
548 			      const struct dsa_device_ops *tag_ops,
549 			      const struct dsa_device_ops *old_tag_ops);
550 int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
551 void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num);
552 
553 /* tag_8021q.c */
554 int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
555 			      struct dsa_notifier_bridge_info *info);
556 int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
557 			       struct dsa_notifier_bridge_info *info);
558 int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
559 				  struct dsa_notifier_tag_8021q_vlan_info *info);
560 int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
561 				  struct dsa_notifier_tag_8021q_vlan_info *info);
562 
563 extern struct list_head dsa_tree_list;
564 
565 #endif
566