• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 
19 #include "spectrum_span.h"
20 #include "spectrum_router.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 };
45 
46 struct mlxsw_sp_bridge_device {
47 	struct net_device *dev;
48 	struct list_head list;
49 	struct list_head ports_list;
50 	struct list_head mids_list;
51 	u8 vlan_enabled:1,
52 	   multicast_enabled:1,
53 	   mrouter:1;
54 	const struct mlxsw_sp_bridge_ops *ops;
55 };
56 
57 struct mlxsw_sp_bridge_port {
58 	struct net_device *dev;
59 	struct mlxsw_sp_bridge_device *bridge_device;
60 	struct list_head list;
61 	struct list_head vlans_list;
62 	unsigned int ref_count;
63 	u8 stp_state;
64 	unsigned long flags;
65 	bool mrouter;
66 	bool lagged;
67 	union {
68 		u16 lag_id;
69 		u16 system_port;
70 	};
71 };
72 
73 struct mlxsw_sp_bridge_vlan {
74 	struct list_head list;
75 	struct list_head port_vlan_list;
76 	u16 vid;
77 };
78 
79 struct mlxsw_sp_bridge_ops {
80 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 			 struct mlxsw_sp_bridge_port *bridge_port,
82 			 struct mlxsw_sp_port *mlxsw_sp_port,
83 			 struct netlink_ext_ack *extack);
84 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 			   struct mlxsw_sp_bridge_port *bridge_port,
86 			   struct mlxsw_sp_port *mlxsw_sp_port);
87 	struct mlxsw_sp_fid *
88 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
89 			   u16 vid);
90 };
91 
92 static int
93 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
94 			       struct mlxsw_sp_bridge_port *bridge_port,
95 			       u16 fid_index);
96 
97 static void
98 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
99 			       struct mlxsw_sp_bridge_port *bridge_port);
100 
101 static void
102 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
103 				   struct mlxsw_sp_bridge_device
104 				   *bridge_device);
105 
106 static void
107 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
108 				 struct mlxsw_sp_bridge_port *bridge_port,
109 				 bool add);
110 
111 static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge * bridge,const struct net_device * br_dev)112 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
113 			    const struct net_device *br_dev)
114 {
115 	struct mlxsw_sp_bridge_device *bridge_device;
116 
117 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
118 		if (bridge_device->dev == br_dev)
119 			return bridge_device;
120 
121 	return NULL;
122 }
123 
mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp * mlxsw_sp,const struct net_device * br_dev)124 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
125 					 const struct net_device *br_dev)
126 {
127 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
128 }
129 
mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device * dev,void * data)130 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
131 						    void *data)
132 {
133 	struct mlxsw_sp *mlxsw_sp = data;
134 
135 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
136 	return 0;
137 }
138 
mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp * mlxsw_sp,struct net_device * dev)139 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
140 						struct net_device *dev)
141 {
142 	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
143 	netdev_walk_all_upper_dev_rcu(dev,
144 				      mlxsw_sp_bridge_device_upper_rif_destroy,
145 				      mlxsw_sp);
146 }
147 
148 static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev)149 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
150 			      struct net_device *br_dev)
151 {
152 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
153 	struct mlxsw_sp_bridge_device *bridge_device;
154 	bool vlan_enabled = br_vlan_enabled(br_dev);
155 
156 	if (vlan_enabled && bridge->vlan_enabled_exists) {
157 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
158 		return ERR_PTR(-EINVAL);
159 	}
160 
161 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
162 	if (!bridge_device)
163 		return ERR_PTR(-ENOMEM);
164 
165 	bridge_device->dev = br_dev;
166 	bridge_device->vlan_enabled = vlan_enabled;
167 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
168 	bridge_device->mrouter = br_multicast_router(br_dev);
169 	INIT_LIST_HEAD(&bridge_device->ports_list);
170 	if (vlan_enabled) {
171 		bridge->vlan_enabled_exists = true;
172 		bridge_device->ops = bridge->bridge_8021q_ops;
173 	} else {
174 		bridge_device->ops = bridge->bridge_8021d_ops;
175 	}
176 	INIT_LIST_HEAD(&bridge_device->mids_list);
177 	list_add(&bridge_device->list, &bridge->bridges_list);
178 
179 	return bridge_device;
180 }
181 
182 static void
mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge * bridge,struct mlxsw_sp_bridge_device * bridge_device)183 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
184 			       struct mlxsw_sp_bridge_device *bridge_device)
185 {
186 	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
187 					    bridge_device->dev);
188 	list_del(&bridge_device->list);
189 	if (bridge_device->vlan_enabled)
190 		bridge->vlan_enabled_exists = false;
191 	WARN_ON(!list_empty(&bridge_device->ports_list));
192 	WARN_ON(!list_empty(&bridge_device->mids_list));
193 	kfree(bridge_device);
194 }
195 
196 static struct mlxsw_sp_bridge_device *
mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge * bridge,struct net_device * br_dev)197 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
198 			   struct net_device *br_dev)
199 {
200 	struct mlxsw_sp_bridge_device *bridge_device;
201 
202 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
203 	if (bridge_device)
204 		return bridge_device;
205 
206 	return mlxsw_sp_bridge_device_create(bridge, br_dev);
207 }
208 
209 static void
mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge * bridge,struct mlxsw_sp_bridge_device * bridge_device)210 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
211 			   struct mlxsw_sp_bridge_device *bridge_device)
212 {
213 	if (list_empty(&bridge_device->ports_list))
214 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
215 }
216 
217 static struct mlxsw_sp_bridge_port *
__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device * bridge_device,const struct net_device * brport_dev)218 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
219 			    const struct net_device *brport_dev)
220 {
221 	struct mlxsw_sp_bridge_port *bridge_port;
222 
223 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
224 		if (bridge_port->dev == brport_dev)
225 			return bridge_port;
226 	}
227 
228 	return NULL;
229 }
230 
231 struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge * bridge,struct net_device * brport_dev)232 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
233 			  struct net_device *brport_dev)
234 {
235 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
236 	struct mlxsw_sp_bridge_device *bridge_device;
237 
238 	if (!br_dev)
239 		return NULL;
240 
241 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
242 	if (!bridge_device)
243 		return NULL;
244 
245 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
246 }
247 
248 static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device * bridge_device,struct net_device * brport_dev)249 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
250 			    struct net_device *brport_dev)
251 {
252 	struct mlxsw_sp_bridge_port *bridge_port;
253 	struct mlxsw_sp_port *mlxsw_sp_port;
254 
255 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
256 	if (!bridge_port)
257 		return NULL;
258 
259 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
260 	bridge_port->lagged = mlxsw_sp_port->lagged;
261 	if (bridge_port->lagged)
262 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
263 	else
264 		bridge_port->system_port = mlxsw_sp_port->local_port;
265 	bridge_port->dev = brport_dev;
266 	bridge_port->bridge_device = bridge_device;
267 	bridge_port->stp_state = BR_STATE_DISABLED;
268 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
269 			     BR_MCAST_FLOOD;
270 	INIT_LIST_HEAD(&bridge_port->vlans_list);
271 	list_add(&bridge_port->list, &bridge_device->ports_list);
272 	bridge_port->ref_count = 1;
273 
274 	return bridge_port;
275 }
276 
277 static void
mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port * bridge_port)278 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
279 {
280 	list_del(&bridge_port->list);
281 	WARN_ON(!list_empty(&bridge_port->vlans_list));
282 	kfree(bridge_port);
283 }
284 
285 static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge * bridge,struct net_device * brport_dev)286 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
287 			 struct net_device *brport_dev)
288 {
289 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
290 	struct mlxsw_sp_bridge_device *bridge_device;
291 	struct mlxsw_sp_bridge_port *bridge_port;
292 	int err;
293 
294 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
295 	if (bridge_port) {
296 		bridge_port->ref_count++;
297 		return bridge_port;
298 	}
299 
300 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
301 	if (IS_ERR(bridge_device))
302 		return ERR_CAST(bridge_device);
303 
304 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
305 	if (!bridge_port) {
306 		err = -ENOMEM;
307 		goto err_bridge_port_create;
308 	}
309 
310 	return bridge_port;
311 
312 err_bridge_port_create:
313 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
314 	return ERR_PTR(err);
315 }
316 
mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge * bridge,struct mlxsw_sp_bridge_port * bridge_port)317 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
318 				     struct mlxsw_sp_bridge_port *bridge_port)
319 {
320 	struct mlxsw_sp_bridge_device *bridge_device;
321 
322 	if (--bridge_port->ref_count != 0)
323 		return;
324 	bridge_device = bridge_port->bridge_device;
325 	mlxsw_sp_bridge_port_destroy(bridge_port);
326 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
327 }
328 
329 static struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port * mlxsw_sp_port,const struct mlxsw_sp_bridge_device * bridge_device,u16 vid)330 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
331 				  const struct mlxsw_sp_bridge_device *
332 				  bridge_device,
333 				  u16 vid)
334 {
335 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
336 
337 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
338 			    list) {
339 		if (!mlxsw_sp_port_vlan->bridge_port)
340 			continue;
341 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
342 		    bridge_device)
343 			continue;
344 		if (bridge_device->vlan_enabled &&
345 		    mlxsw_sp_port_vlan->vid != vid)
346 			continue;
347 		return mlxsw_sp_port_vlan;
348 	}
349 
350 	return NULL;
351 }
352 
353 static struct mlxsw_sp_port_vlan*
mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port * mlxsw_sp_port,u16 fid_index)354 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
355 			       u16 fid_index)
356 {
357 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
358 
359 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
360 			    list) {
361 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
362 
363 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
364 			return mlxsw_sp_port_vlan;
365 	}
366 
367 	return NULL;
368 }
369 
370 static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port * bridge_port,u16 vid)371 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
372 			  u16 vid)
373 {
374 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
375 
376 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
377 		if (bridge_vlan->vid == vid)
378 			return bridge_vlan;
379 	}
380 
381 	return NULL;
382 }
383 
384 static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port * bridge_port,u16 vid)385 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
386 {
387 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
388 
389 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
390 	if (!bridge_vlan)
391 		return NULL;
392 
393 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
394 	bridge_vlan->vid = vid;
395 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
396 
397 	return bridge_vlan;
398 }
399 
400 static void
mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan * bridge_vlan)401 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
402 {
403 	list_del(&bridge_vlan->list);
404 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
405 	kfree(bridge_vlan);
406 }
407 
408 static struct mlxsw_sp_bridge_vlan *
mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port * bridge_port,u16 vid)409 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
410 {
411 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
412 
413 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
414 	if (bridge_vlan)
415 		return bridge_vlan;
416 
417 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
418 }
419 
mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan * bridge_vlan)420 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
421 {
422 	if (list_empty(&bridge_vlan->port_vlan_list))
423 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
424 }
425 
mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge * bridge,struct net_device * dev,unsigned long * brport_flags)426 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
427 					   struct net_device *dev,
428 					   unsigned long *brport_flags)
429 {
430 	struct mlxsw_sp_bridge_port *bridge_port;
431 
432 	bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
433 	if (WARN_ON(!bridge_port))
434 		return;
435 
436 	memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
437 }
438 
mlxsw_sp_port_attr_get(struct net_device * dev,struct switchdev_attr * attr)439 static int mlxsw_sp_port_attr_get(struct net_device *dev,
440 				  struct switchdev_attr *attr)
441 {
442 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
443 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
444 
445 	switch (attr->id) {
446 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
447 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
448 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
449 		       attr->u.ppid.id_len);
450 		break;
451 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
452 		mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
453 					       &attr->u.brport_flags);
454 		break;
455 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
456 		attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
457 					       BR_MCAST_FLOOD;
458 		break;
459 	default:
460 		return -EOPNOTSUPP;
461 	}
462 
463 	return 0;
464 }
465 
466 static int
mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_vlan * bridge_vlan,u8 state)467 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
468 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
469 				  u8 state)
470 {
471 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
472 
473 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
474 			    bridge_vlan_node) {
475 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
476 			continue;
477 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
478 						 bridge_vlan->vid, state);
479 	}
480 
481 	return 0;
482 }
483 
mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_trans * trans,struct net_device * orig_dev,u8 state)484 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
485 					    struct switchdev_trans *trans,
486 					    struct net_device *orig_dev,
487 					    u8 state)
488 {
489 	struct mlxsw_sp_bridge_port *bridge_port;
490 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
491 	int err;
492 
493 	if (switchdev_trans_ph_prepare(trans))
494 		return 0;
495 
496 	/* It's possible we failed to enslave the port, yet this
497 	 * operation is executed due to it being deferred.
498 	 */
499 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
500 						orig_dev);
501 	if (!bridge_port)
502 		return 0;
503 
504 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
505 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
506 							bridge_vlan, state);
507 		if (err)
508 			goto err_port_bridge_vlan_stp_set;
509 	}
510 
511 	bridge_port->stp_state = state;
512 
513 	return 0;
514 
515 err_port_bridge_vlan_stp_set:
516 	list_for_each_entry_continue_reverse(bridge_vlan,
517 					     &bridge_port->vlans_list, list)
518 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
519 						  bridge_port->stp_state);
520 	return err;
521 }
522 
523 static int
mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_vlan * bridge_vlan,enum mlxsw_sp_flood_type packet_type,bool member)524 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
525 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
526 				    enum mlxsw_sp_flood_type packet_type,
527 				    bool member)
528 {
529 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
530 
531 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
532 			    bridge_vlan_node) {
533 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
534 			continue;
535 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
536 					      packet_type,
537 					      mlxsw_sp_port->local_port,
538 					      member);
539 	}
540 
541 	return 0;
542 }
543 
544 static int
mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,enum mlxsw_sp_flood_type packet_type,bool member)545 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
546 				     struct mlxsw_sp_bridge_port *bridge_port,
547 				     enum mlxsw_sp_flood_type packet_type,
548 				     bool member)
549 {
550 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
551 	int err;
552 
553 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
554 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
555 							  bridge_vlan,
556 							  packet_type,
557 							  member);
558 		if (err)
559 			goto err_port_bridge_vlan_flood_set;
560 	}
561 
562 	return 0;
563 
564 err_port_bridge_vlan_flood_set:
565 	list_for_each_entry_continue_reverse(bridge_vlan,
566 					     &bridge_port->vlans_list, list)
567 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
568 						    packet_type, !member);
569 	return err;
570 }
571 
572 static int
mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_vlan * bridge_vlan,bool set)573 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
574 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
575 				       bool set)
576 {
577 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
578 	u16 vid = bridge_vlan->vid;
579 
580 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
581 			    bridge_vlan_node) {
582 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
583 			continue;
584 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
585 	}
586 
587 	return 0;
588 }
589 
590 static int
mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,bool set)591 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
592 				  struct mlxsw_sp_bridge_port *bridge_port,
593 				  bool set)
594 {
595 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
596 	int err;
597 
598 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
599 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
600 							     bridge_vlan, set);
601 		if (err)
602 			goto err_port_bridge_vlan_learning_set;
603 	}
604 
605 	return 0;
606 
607 err_port_bridge_vlan_learning_set:
608 	list_for_each_entry_continue_reverse(bridge_vlan,
609 					     &bridge_port->vlans_list, list)
610 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
611 						       bridge_vlan, !set);
612 	return err;
613 }
614 
mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_trans * trans,struct net_device * orig_dev,unsigned long brport_flags)615 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
616 					   struct switchdev_trans *trans,
617 					   struct net_device *orig_dev,
618 					   unsigned long brport_flags)
619 {
620 	struct mlxsw_sp_bridge_port *bridge_port;
621 	int err;
622 
623 	if (switchdev_trans_ph_prepare(trans))
624 		return 0;
625 
626 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
627 						orig_dev);
628 	if (!bridge_port)
629 		return 0;
630 
631 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
632 						   MLXSW_SP_FLOOD_TYPE_UC,
633 						   brport_flags & BR_FLOOD);
634 	if (err)
635 		return err;
636 
637 	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
638 						brport_flags & BR_LEARNING);
639 	if (err)
640 		return err;
641 
642 	if (bridge_port->bridge_device->multicast_enabled)
643 		goto out;
644 
645 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
646 						   MLXSW_SP_FLOOD_TYPE_MC,
647 						   brport_flags &
648 						   BR_MCAST_FLOOD);
649 	if (err)
650 		return err;
651 
652 out:
653 	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
654 	return 0;
655 }
656 
mlxsw_sp_ageing_set(struct mlxsw_sp * mlxsw_sp,u32 ageing_time)657 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
658 {
659 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
660 	int err;
661 
662 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
663 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
664 	if (err)
665 		return err;
666 	mlxsw_sp->bridge->ageing_time = ageing_time;
667 	return 0;
668 }
669 
mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_trans * trans,unsigned long ageing_clock_t)670 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
671 					    struct switchdev_trans *trans,
672 					    unsigned long ageing_clock_t)
673 {
674 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
675 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
676 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
677 
678 	if (switchdev_trans_ph_prepare(trans)) {
679 		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
680 		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
681 			return -ERANGE;
682 		else
683 			return 0;
684 	}
685 
686 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
687 }
688 
mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_trans * trans,struct net_device * orig_dev,bool vlan_enabled)689 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
690 					  struct switchdev_trans *trans,
691 					  struct net_device *orig_dev,
692 					  bool vlan_enabled)
693 {
694 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
695 	struct mlxsw_sp_bridge_device *bridge_device;
696 
697 	if (!switchdev_trans_ph_prepare(trans))
698 		return 0;
699 
700 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
701 	if (WARN_ON(!bridge_device))
702 		return -EINVAL;
703 
704 	if (bridge_device->vlan_enabled == vlan_enabled)
705 		return 0;
706 
707 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
708 	return -EINVAL;
709 }
710 
mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_trans * trans,struct net_device * orig_dev,bool is_port_mrouter)711 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
712 					  struct switchdev_trans *trans,
713 					  struct net_device *orig_dev,
714 					  bool is_port_mrouter)
715 {
716 	struct mlxsw_sp_bridge_port *bridge_port;
717 	int err;
718 
719 	if (switchdev_trans_ph_prepare(trans))
720 		return 0;
721 
722 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
723 						orig_dev);
724 	if (!bridge_port)
725 		return 0;
726 
727 	if (!bridge_port->bridge_device->multicast_enabled)
728 		goto out;
729 
730 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
731 						   MLXSW_SP_FLOOD_TYPE_MC,
732 						   is_port_mrouter);
733 	if (err)
734 		return err;
735 
736 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
737 					 is_port_mrouter);
738 out:
739 	bridge_port->mrouter = is_port_mrouter;
740 	return 0;
741 }
742 
mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port * bridge_port)743 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
744 {
745 	const struct mlxsw_sp_bridge_device *bridge_device;
746 
747 	bridge_device = bridge_port->bridge_device;
748 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
749 					bridge_port->flags & BR_MCAST_FLOOD;
750 }
751 
mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_trans * trans,struct net_device * orig_dev,bool mc_disabled)752 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
753 					 struct switchdev_trans *trans,
754 					 struct net_device *orig_dev,
755 					 bool mc_disabled)
756 {
757 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
758 	struct mlxsw_sp_bridge_device *bridge_device;
759 	struct mlxsw_sp_bridge_port *bridge_port;
760 	int err;
761 
762 	if (switchdev_trans_ph_prepare(trans))
763 		return 0;
764 
765 	/* It's possible we failed to enslave the port, yet this
766 	 * operation is executed due to it being deferred.
767 	 */
768 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
769 	if (!bridge_device)
770 		return 0;
771 
772 	if (bridge_device->multicast_enabled != !mc_disabled) {
773 		bridge_device->multicast_enabled = !mc_disabled;
774 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
775 						   bridge_device);
776 	}
777 
778 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
779 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
780 		bool member = mlxsw_sp_mc_flood(bridge_port);
781 
782 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
783 							   bridge_port,
784 							   packet_type, member);
785 		if (err)
786 			return err;
787 	}
788 
789 	bridge_device->multicast_enabled = !mc_disabled;
790 
791 	return 0;
792 }
793 
mlxsw_sp_smid_router_port_set(struct mlxsw_sp * mlxsw_sp,u16 mid_idx,bool add)794 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
795 					 u16 mid_idx, bool add)
796 {
797 	char *smid_pl;
798 	int err;
799 
800 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
801 	if (!smid_pl)
802 		return -ENOMEM;
803 
804 	mlxsw_reg_smid_pack(smid_pl, mid_idx,
805 			    mlxsw_sp_router_port(mlxsw_sp), add);
806 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
807 	kfree(smid_pl);
808 	return err;
809 }
810 
811 static void
mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,bool add)812 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
813 				   struct mlxsw_sp_bridge_device *bridge_device,
814 				   bool add)
815 {
816 	struct mlxsw_sp_mid *mid;
817 
818 	list_for_each_entry(mid, &bridge_device->mids_list, list)
819 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
820 }
821 
822 static int
mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_trans * trans,struct net_device * orig_dev,bool is_mrouter)823 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
824 				  struct switchdev_trans *trans,
825 				  struct net_device *orig_dev,
826 				  bool is_mrouter)
827 {
828 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
829 	struct mlxsw_sp_bridge_device *bridge_device;
830 
831 	if (switchdev_trans_ph_prepare(trans))
832 		return 0;
833 
834 	/* It's possible we failed to enslave the port, yet this
835 	 * operation is executed due to it being deferred.
836 	 */
837 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
838 	if (!bridge_device)
839 		return 0;
840 
841 	if (bridge_device->mrouter != is_mrouter)
842 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
843 						   is_mrouter);
844 	bridge_device->mrouter = is_mrouter;
845 	return 0;
846 }
847 
mlxsw_sp_port_attr_set(struct net_device * dev,const struct switchdev_attr * attr,struct switchdev_trans * trans)848 static int mlxsw_sp_port_attr_set(struct net_device *dev,
849 				  const struct switchdev_attr *attr,
850 				  struct switchdev_trans *trans)
851 {
852 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
853 	int err;
854 
855 	switch (attr->id) {
856 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
857 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
858 						       attr->orig_dev,
859 						       attr->u.stp_state);
860 		break;
861 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
862 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
863 						      attr->orig_dev,
864 						      attr->u.brport_flags);
865 		break;
866 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
867 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
868 						       attr->u.ageing_time);
869 		break;
870 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
871 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
872 						     attr->orig_dev,
873 						     attr->u.vlan_filtering);
874 		break;
875 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
876 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
877 						     attr->orig_dev,
878 						     attr->u.mrouter);
879 		break;
880 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
881 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
882 						    attr->orig_dev,
883 						    attr->u.mc_disabled);
884 		break;
885 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
886 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
887 							attr->orig_dev,
888 							attr->u.mrouter);
889 		break;
890 	default:
891 		err = -EOPNOTSUPP;
892 		break;
893 	}
894 
895 	if (switchdev_trans_ph_commit(trans))
896 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
897 
898 	return err;
899 }
900 
901 static int
mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct mlxsw_sp_bridge_port * bridge_port)902 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
903 			    struct mlxsw_sp_bridge_port *bridge_port)
904 {
905 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
906 	struct mlxsw_sp_bridge_device *bridge_device;
907 	u8 local_port = mlxsw_sp_port->local_port;
908 	u16 vid = mlxsw_sp_port_vlan->vid;
909 	struct mlxsw_sp_fid *fid;
910 	int err;
911 
912 	bridge_device = bridge_port->bridge_device;
913 	fid = bridge_device->ops->fid_get(bridge_device, vid);
914 	if (IS_ERR(fid))
915 		return PTR_ERR(fid);
916 
917 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
918 				     bridge_port->flags & BR_FLOOD);
919 	if (err)
920 		goto err_fid_uc_flood_set;
921 
922 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
923 				     mlxsw_sp_mc_flood(bridge_port));
924 	if (err)
925 		goto err_fid_mc_flood_set;
926 
927 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
928 				     true);
929 	if (err)
930 		goto err_fid_bc_flood_set;
931 
932 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
933 	if (err)
934 		goto err_fid_port_vid_map;
935 
936 	mlxsw_sp_port_vlan->fid = fid;
937 
938 	return 0;
939 
940 err_fid_port_vid_map:
941 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
942 err_fid_bc_flood_set:
943 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
944 err_fid_mc_flood_set:
945 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
946 err_fid_uc_flood_set:
947 	mlxsw_sp_fid_put(fid);
948 	return err;
949 }
950 
951 static void
mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)952 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
953 {
954 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
955 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
956 	u8 local_port = mlxsw_sp_port->local_port;
957 	u16 vid = mlxsw_sp_port_vlan->vid;
958 
959 	mlxsw_sp_port_vlan->fid = NULL;
960 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
961 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
962 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
963 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
964 	mlxsw_sp_fid_put(fid);
965 }
966 
967 static u16
mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,bool is_pvid)968 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
969 			     u16 vid, bool is_pvid)
970 {
971 	if (is_pvid)
972 		return vid;
973 	else if (mlxsw_sp_port->pvid == vid)
974 		return 0;	/* Dis-allow untagged packets */
975 	else
976 		return mlxsw_sp_port->pvid;
977 }
978 
979 static int
mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan,struct mlxsw_sp_bridge_port * bridge_port)980 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
981 			       struct mlxsw_sp_bridge_port *bridge_port)
982 {
983 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
984 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
985 	u16 vid = mlxsw_sp_port_vlan->vid;
986 	int err;
987 
988 	/* No need to continue if only VLAN flags were changed */
989 	if (mlxsw_sp_port_vlan->bridge_port) {
990 		mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
991 		return 0;
992 	}
993 
994 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
995 	if (err)
996 		return err;
997 
998 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
999 					     bridge_port->flags & BR_LEARNING);
1000 	if (err)
1001 		goto err_port_vid_learning_set;
1002 
1003 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1004 					bridge_port->stp_state);
1005 	if (err)
1006 		goto err_port_vid_stp_set;
1007 
1008 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1009 	if (!bridge_vlan) {
1010 		err = -ENOMEM;
1011 		goto err_bridge_vlan_get;
1012 	}
1013 
1014 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1015 		 &bridge_vlan->port_vlan_list);
1016 
1017 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1018 				 bridge_port->dev);
1019 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1020 
1021 	return 0;
1022 
1023 err_bridge_vlan_get:
1024 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1025 err_port_vid_stp_set:
1026 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1027 err_port_vid_learning_set:
1028 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1029 	return err;
1030 }
1031 
1032 void
mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1033 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1034 {
1035 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1036 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1037 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1038 	struct mlxsw_sp_bridge_port *bridge_port;
1039 	u16 vid = mlxsw_sp_port_vlan->vid;
1040 	bool last_port, last_vlan;
1041 
1042 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1043 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1044 		return;
1045 
1046 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1047 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1048 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1049 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1050 
1051 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1052 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1053 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1054 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1055 	if (last_port)
1056 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1057 					       bridge_port,
1058 					       mlxsw_sp_fid_index(fid));
1059 	if (last_vlan)
1060 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1061 
1062 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1063 
1064 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1065 	mlxsw_sp_port_vlan->bridge_port = NULL;
1066 }
1067 
1068 static int
mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,u16 vid,bool is_untagged,bool is_pvid)1069 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1070 			      struct mlxsw_sp_bridge_port *bridge_port,
1071 			      u16 vid, bool is_untagged, bool is_pvid)
1072 {
1073 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1074 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1075 	u16 old_pvid = mlxsw_sp_port->pvid;
1076 	int err;
1077 
1078 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
1079 	if (IS_ERR(mlxsw_sp_port_vlan))
1080 		return PTR_ERR(mlxsw_sp_port_vlan);
1081 
1082 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1083 				     is_untagged);
1084 	if (err)
1085 		goto err_port_vlan_set;
1086 
1087 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1088 	if (err)
1089 		goto err_port_pvid_set;
1090 
1091 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1092 	if (err)
1093 		goto err_port_vlan_bridge_join;
1094 
1095 	return 0;
1096 
1097 err_port_vlan_bridge_join:
1098 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1099 err_port_pvid_set:
1100 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1101 err_port_vlan_set:
1102 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1103 	return err;
1104 }
1105 
1106 static int
mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp * mlxsw_sp,const struct net_device * br_dev,const struct switchdev_obj_port_vlan * vlan)1107 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1108 				const struct net_device *br_dev,
1109 				const struct switchdev_obj_port_vlan *vlan)
1110 {
1111 	struct mlxsw_sp_rif *rif;
1112 	struct mlxsw_sp_fid *fid;
1113 	u16 pvid;
1114 	u16 vid;
1115 
1116 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1117 	if (!rif)
1118 		return 0;
1119 	fid = mlxsw_sp_rif_fid(rif);
1120 	pvid = mlxsw_sp_fid_8021q_vid(fid);
1121 
1122 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1123 		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1124 			if (vid != pvid) {
1125 				netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1126 				return -EBUSY;
1127 			}
1128 		} else {
1129 			if (vid == pvid) {
1130 				netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1131 				return -EBUSY;
1132 			}
1133 		}
1134 	}
1135 
1136 	return 0;
1137 }
1138 
mlxsw_sp_port_vlans_add(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_vlan * vlan,struct switchdev_trans * trans)1139 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1140 				   const struct switchdev_obj_port_vlan *vlan,
1141 				   struct switchdev_trans *trans)
1142 {
1143 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1144 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1145 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1146 	struct net_device *orig_dev = vlan->obj.orig_dev;
1147 	struct mlxsw_sp_bridge_port *bridge_port;
1148 	u16 vid;
1149 
1150 	if (netif_is_bridge_master(orig_dev)) {
1151 		int err = 0;
1152 
1153 		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1154 		    br_vlan_enabled(orig_dev) &&
1155 		    switchdev_trans_ph_prepare(trans))
1156 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1157 							      orig_dev, vlan);
1158 		if (!err)
1159 			err = -EOPNOTSUPP;
1160 		return err;
1161 	}
1162 
1163 	if (switchdev_trans_ph_prepare(trans))
1164 		return 0;
1165 
1166 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1167 	if (WARN_ON(!bridge_port))
1168 		return -EINVAL;
1169 
1170 	if (!bridge_port->bridge_device->vlan_enabled)
1171 		return 0;
1172 
1173 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1174 		int err;
1175 
1176 		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1177 						    vid, flag_untagged,
1178 						    flag_pvid);
1179 		if (err)
1180 			return err;
1181 	}
1182 
1183 	return 0;
1184 }
1185 
mlxsw_sp_fdb_flush_type(bool lagged)1186 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1187 {
1188 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1189 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1190 }
1191 
1192 static int
mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_port * bridge_port,u16 fid_index)1193 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1194 			       struct mlxsw_sp_bridge_port *bridge_port,
1195 			       u16 fid_index)
1196 {
1197 	bool lagged = bridge_port->lagged;
1198 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1199 	u16 system_port;
1200 
1201 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1202 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1203 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1204 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1205 
1206 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1207 }
1208 
mlxsw_sp_sfd_rec_policy(bool dynamic)1209 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1210 {
1211 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1212 			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1213 }
1214 
mlxsw_sp_sfd_op(bool adding)1215 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1216 {
1217 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1218 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1219 }
1220 
__mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp * mlxsw_sp,u8 local_port,const char * mac,u16 fid,bool adding,enum mlxsw_reg_sfd_rec_action action,enum mlxsw_reg_sfd_rec_policy policy)1221 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1222 				     const char *mac, u16 fid, bool adding,
1223 				     enum mlxsw_reg_sfd_rec_action action,
1224 				     enum mlxsw_reg_sfd_rec_policy policy)
1225 {
1226 	char *sfd_pl;
1227 	u8 num_rec;
1228 	int err;
1229 
1230 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1231 	if (!sfd_pl)
1232 		return -ENOMEM;
1233 
1234 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1235 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1236 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1237 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1238 	if (err)
1239 		goto out;
1240 
1241 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1242 		err = -EBUSY;
1243 
1244 out:
1245 	kfree(sfd_pl);
1246 	return err;
1247 }
1248 
mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp * mlxsw_sp,u8 local_port,const char * mac,u16 fid,bool adding,bool dynamic)1249 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1250 				   const char *mac, u16 fid, bool adding,
1251 				   bool dynamic)
1252 {
1253 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1254 					 MLXSW_REG_SFD_REC_ACTION_NOP,
1255 					 mlxsw_sp_sfd_rec_policy(dynamic));
1256 }
1257 
mlxsw_sp_rif_fdb_op(struct mlxsw_sp * mlxsw_sp,const char * mac,u16 fid,bool adding)1258 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1259 			bool adding)
1260 {
1261 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1262 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1263 					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1264 }
1265 
mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp * mlxsw_sp,u16 lag_id,const char * mac,u16 fid,u16 lag_vid,bool adding,bool dynamic)1266 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1267 				       const char *mac, u16 fid, u16 lag_vid,
1268 				       bool adding, bool dynamic)
1269 {
1270 	char *sfd_pl;
1271 	u8 num_rec;
1272 	int err;
1273 
1274 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1275 	if (!sfd_pl)
1276 		return -ENOMEM;
1277 
1278 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1279 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1280 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1281 				  lag_vid, lag_id);
1282 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1283 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1284 	if (err)
1285 		goto out;
1286 
1287 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1288 		err = -EBUSY;
1289 
1290 out:
1291 	kfree(sfd_pl);
1292 	return err;
1293 }
1294 
1295 static int
mlxsw_sp_port_fdb_set(struct mlxsw_sp_port * mlxsw_sp_port,struct switchdev_notifier_fdb_info * fdb_info,bool adding)1296 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1297 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1298 {
1299 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1300 	struct net_device *orig_dev = fdb_info->info.dev;
1301 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1302 	struct mlxsw_sp_bridge_device *bridge_device;
1303 	struct mlxsw_sp_bridge_port *bridge_port;
1304 	u16 fid_index, vid;
1305 
1306 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1307 	if (!bridge_port)
1308 		return -EINVAL;
1309 
1310 	bridge_device = bridge_port->bridge_device;
1311 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1312 							       bridge_device,
1313 							       fdb_info->vid);
1314 	if (!mlxsw_sp_port_vlan)
1315 		return 0;
1316 
1317 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1318 	vid = mlxsw_sp_port_vlan->vid;
1319 
1320 	if (!bridge_port->lagged)
1321 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1322 					       bridge_port->system_port,
1323 					       fdb_info->addr, fid_index,
1324 					       adding, false);
1325 	else
1326 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1327 						   bridge_port->lag_id,
1328 						   fdb_info->addr, fid_index,
1329 						   vid, adding, false);
1330 }
1331 
mlxsw_sp_port_mdb_op(struct mlxsw_sp * mlxsw_sp,const char * addr,u16 fid,u16 mid_idx,bool adding)1332 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1333 				u16 fid, u16 mid_idx, bool adding)
1334 {
1335 	char *sfd_pl;
1336 	u8 num_rec;
1337 	int err;
1338 
1339 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1340 	if (!sfd_pl)
1341 		return -ENOMEM;
1342 
1343 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1344 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1345 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1346 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1347 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1348 	if (err)
1349 		goto out;
1350 
1351 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1352 		err = -EBUSY;
1353 
1354 out:
1355 	kfree(sfd_pl);
1356 	return err;
1357 }
1358 
mlxsw_sp_port_smid_full_entry(struct mlxsw_sp * mlxsw_sp,u16 mid_idx,long * ports_bitmap,bool set_router_port)1359 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1360 					 long *ports_bitmap,
1361 					 bool set_router_port)
1362 {
1363 	char *smid_pl;
1364 	int err, i;
1365 
1366 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1367 	if (!smid_pl)
1368 		return -ENOMEM;
1369 
1370 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1371 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1372 		if (mlxsw_sp->ports[i])
1373 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1374 	}
1375 
1376 	mlxsw_reg_smid_port_mask_set(smid_pl,
1377 				     mlxsw_sp_router_port(mlxsw_sp), 1);
1378 
1379 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1380 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1381 
1382 	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1383 				set_router_port);
1384 
1385 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1386 	kfree(smid_pl);
1387 	return err;
1388 }
1389 
mlxsw_sp_port_smid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 mid_idx,bool add)1390 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1391 				  u16 mid_idx, bool add)
1392 {
1393 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1394 	char *smid_pl;
1395 	int err;
1396 
1397 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1398 	if (!smid_pl)
1399 		return -ENOMEM;
1400 
1401 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1402 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1403 	kfree(smid_pl);
1404 	return err;
1405 }
1406 
1407 static struct
__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device * bridge_device,const unsigned char * addr,u16 fid)1408 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1409 				const unsigned char *addr,
1410 				u16 fid)
1411 {
1412 	struct mlxsw_sp_mid *mid;
1413 
1414 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1415 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1416 			return mid;
1417 	}
1418 	return NULL;
1419 }
1420 
1421 static void
mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_port * bridge_port,unsigned long * ports_bitmap)1422 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1423 				      struct mlxsw_sp_bridge_port *bridge_port,
1424 				      unsigned long *ports_bitmap)
1425 {
1426 	struct mlxsw_sp_port *mlxsw_sp_port;
1427 	u64 max_lag_members, i;
1428 	int lag_id;
1429 
1430 	if (!bridge_port->lagged) {
1431 		set_bit(bridge_port->system_port, ports_bitmap);
1432 	} else {
1433 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1434 						     MAX_LAG_MEMBERS);
1435 		lag_id = bridge_port->lag_id;
1436 		for (i = 0; i < max_lag_members; i++) {
1437 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1438 								 lag_id, i);
1439 			if (mlxsw_sp_port)
1440 				set_bit(mlxsw_sp_port->local_port,
1441 					ports_bitmap);
1442 		}
1443 	}
1444 }
1445 
1446 static void
mlxsw_sp_mc_get_mrouters_bitmap(unsigned long * flood_bitmap,struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp * mlxsw_sp)1447 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1448 				struct mlxsw_sp_bridge_device *bridge_device,
1449 				struct mlxsw_sp *mlxsw_sp)
1450 {
1451 	struct mlxsw_sp_bridge_port *bridge_port;
1452 
1453 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1454 		if (bridge_port->mrouter) {
1455 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1456 							      bridge_port,
1457 							      flood_bitmap);
1458 		}
1459 	}
1460 }
1461 
1462 static bool
mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mid * mid,struct mlxsw_sp_bridge_device * bridge_device)1463 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1464 			    struct mlxsw_sp_mid *mid,
1465 			    struct mlxsw_sp_bridge_device *bridge_device)
1466 {
1467 	long *flood_bitmap;
1468 	int num_of_ports;
1469 	int alloc_size;
1470 	u16 mid_idx;
1471 	int err;
1472 
1473 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1474 				      MLXSW_SP_MID_MAX);
1475 	if (mid_idx == MLXSW_SP_MID_MAX)
1476 		return false;
1477 
1478 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1479 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1480 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1481 	if (!flood_bitmap)
1482 		return false;
1483 
1484 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1485 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1486 
1487 	mid->mid = mid_idx;
1488 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1489 					    bridge_device->mrouter);
1490 	kfree(flood_bitmap);
1491 	if (err)
1492 		return false;
1493 
1494 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1495 				   true);
1496 	if (err)
1497 		return false;
1498 
1499 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1500 	mid->in_hw = true;
1501 	return true;
1502 }
1503 
mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_mid * mid)1504 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1505 					struct mlxsw_sp_mid *mid)
1506 {
1507 	if (!mid->in_hw)
1508 		return 0;
1509 
1510 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1511 	mid->in_hw = false;
1512 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1513 				    false);
1514 }
1515 
1516 static struct
__mlxsw_sp_mc_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_bridge_device * bridge_device,const unsigned char * addr,u16 fid)1517 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1518 				  struct mlxsw_sp_bridge_device *bridge_device,
1519 				  const unsigned char *addr,
1520 				  u16 fid)
1521 {
1522 	struct mlxsw_sp_mid *mid;
1523 	size_t alloc_size;
1524 
1525 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1526 	if (!mid)
1527 		return NULL;
1528 
1529 	alloc_size = sizeof(unsigned long) *
1530 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1531 
1532 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1533 	if (!mid->ports_in_mid)
1534 		goto err_ports_in_mid_alloc;
1535 
1536 	ether_addr_copy(mid->addr, addr);
1537 	mid->fid = fid;
1538 	mid->in_hw = false;
1539 
1540 	if (!bridge_device->multicast_enabled)
1541 		goto out;
1542 
1543 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1544 		goto err_write_mdb_entry;
1545 
1546 out:
1547 	list_add_tail(&mid->list, &bridge_device->mids_list);
1548 	return mid;
1549 
1550 err_write_mdb_entry:
1551 	kfree(mid->ports_in_mid);
1552 err_ports_in_mid_alloc:
1553 	kfree(mid);
1554 	return NULL;
1555 }
1556 
mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_mid * mid)1557 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1558 					 struct mlxsw_sp_mid *mid)
1559 {
1560 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1561 	int err = 0;
1562 
1563 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1564 	if (bitmap_empty(mid->ports_in_mid,
1565 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1566 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1567 		list_del(&mid->list);
1568 		kfree(mid->ports_in_mid);
1569 		kfree(mid);
1570 	}
1571 	return err;
1572 }
1573 
mlxsw_sp_port_mdb_add(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_mdb * mdb,struct switchdev_trans * trans)1574 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1575 				 const struct switchdev_obj_port_mdb *mdb,
1576 				 struct switchdev_trans *trans)
1577 {
1578 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1579 	struct net_device *orig_dev = mdb->obj.orig_dev;
1580 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1581 	struct net_device *dev = mlxsw_sp_port->dev;
1582 	struct mlxsw_sp_bridge_device *bridge_device;
1583 	struct mlxsw_sp_bridge_port *bridge_port;
1584 	struct mlxsw_sp_mid *mid;
1585 	u16 fid_index;
1586 	int err = 0;
1587 
1588 	if (switchdev_trans_ph_commit(trans))
1589 		return 0;
1590 
1591 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1592 	if (!bridge_port)
1593 		return 0;
1594 
1595 	bridge_device = bridge_port->bridge_device;
1596 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1597 							       bridge_device,
1598 							       mdb->vid);
1599 	if (!mlxsw_sp_port_vlan)
1600 		return 0;
1601 
1602 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1603 
1604 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1605 	if (!mid) {
1606 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1607 					  fid_index);
1608 		if (!mid) {
1609 			netdev_err(dev, "Unable to allocate MC group\n");
1610 			return -ENOMEM;
1611 		}
1612 	}
1613 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1614 
1615 	if (!bridge_device->multicast_enabled)
1616 		return 0;
1617 
1618 	if (bridge_port->mrouter)
1619 		return 0;
1620 
1621 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1622 	if (err) {
1623 		netdev_err(dev, "Unable to set SMID\n");
1624 		goto err_out;
1625 	}
1626 
1627 	return 0;
1628 
1629 err_out:
1630 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1631 	return err;
1632 }
1633 
1634 static void
mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_device * bridge_device)1635 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1636 				   struct mlxsw_sp_bridge_device
1637 				   *bridge_device)
1638 {
1639 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1640 	struct mlxsw_sp_mid *mid;
1641 	bool mc_enabled;
1642 
1643 	mc_enabled = bridge_device->multicast_enabled;
1644 
1645 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1646 		if (mc_enabled)
1647 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1648 						    bridge_device);
1649 		else
1650 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1651 	}
1652 }
1653 
1654 static void
mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,bool add)1655 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1656 				 struct mlxsw_sp_bridge_port *bridge_port,
1657 				 bool add)
1658 {
1659 	struct mlxsw_sp_bridge_device *bridge_device;
1660 	struct mlxsw_sp_mid *mid;
1661 
1662 	bridge_device = bridge_port->bridge_device;
1663 
1664 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1665 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1666 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1667 	}
1668 }
1669 
1670 struct mlxsw_sp_span_respin_work {
1671 	struct work_struct work;
1672 	struct mlxsw_sp *mlxsw_sp;
1673 };
1674 
mlxsw_sp_span_respin_work(struct work_struct * work)1675 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1676 {
1677 	struct mlxsw_sp_span_respin_work *respin_work =
1678 		container_of(work, struct mlxsw_sp_span_respin_work, work);
1679 
1680 	rtnl_lock();
1681 	mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1682 	rtnl_unlock();
1683 	kfree(respin_work);
1684 }
1685 
mlxsw_sp_span_respin_schedule(struct mlxsw_sp * mlxsw_sp)1686 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1687 {
1688 	struct mlxsw_sp_span_respin_work *respin_work;
1689 
1690 	respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1691 	if (!respin_work)
1692 		return;
1693 
1694 	INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1695 	respin_work->mlxsw_sp = mlxsw_sp;
1696 
1697 	mlxsw_core_schedule_work(&respin_work->work);
1698 }
1699 
mlxsw_sp_port_obj_add(struct net_device * dev,const struct switchdev_obj * obj,struct switchdev_trans * trans)1700 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1701 				 const struct switchdev_obj *obj,
1702 				 struct switchdev_trans *trans)
1703 {
1704 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1705 	const struct switchdev_obj_port_vlan *vlan;
1706 	int err = 0;
1707 
1708 	switch (obj->id) {
1709 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1710 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1711 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
1712 
1713 		if (switchdev_trans_ph_prepare(trans)) {
1714 			/* The event is emitted before the changes are actually
1715 			 * applied to the bridge. Therefore schedule the respin
1716 			 * call for later, so that the respin logic sees the
1717 			 * updated bridge state.
1718 			 */
1719 			mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1720 		}
1721 		break;
1722 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1723 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1724 					    SWITCHDEV_OBJ_PORT_MDB(obj),
1725 					    trans);
1726 		break;
1727 	default:
1728 		err = -EOPNOTSUPP;
1729 		break;
1730 	}
1731 
1732 	return err;
1733 }
1734 
1735 static void
mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,u16 vid)1736 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1737 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1738 {
1739 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1740 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1741 
1742 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1743 	if (WARN_ON(!mlxsw_sp_port_vlan))
1744 		return;
1745 
1746 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1747 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1748 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1749 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1750 }
1751 
mlxsw_sp_port_vlans_del(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_vlan * vlan)1752 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1753 				   const struct switchdev_obj_port_vlan *vlan)
1754 {
1755 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1756 	struct net_device *orig_dev = vlan->obj.orig_dev;
1757 	struct mlxsw_sp_bridge_port *bridge_port;
1758 	u16 vid;
1759 
1760 	if (netif_is_bridge_master(orig_dev))
1761 		return -EOPNOTSUPP;
1762 
1763 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1764 	if (WARN_ON(!bridge_port))
1765 		return -EINVAL;
1766 
1767 	if (!bridge_port->bridge_device->vlan_enabled)
1768 		return 0;
1769 
1770 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1771 		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1772 
1773 	return 0;
1774 }
1775 
1776 static int
__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_mid * mid)1777 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1778 			struct mlxsw_sp_bridge_port *bridge_port,
1779 			struct mlxsw_sp_mid *mid)
1780 {
1781 	struct net_device *dev = mlxsw_sp_port->dev;
1782 	int err;
1783 
1784 	if (bridge_port->bridge_device->multicast_enabled &&
1785 	    !bridge_port->mrouter) {
1786 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1787 		if (err)
1788 			netdev_err(dev, "Unable to remove port from SMID\n");
1789 	}
1790 
1791 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1792 	if (err)
1793 		netdev_err(dev, "Unable to remove MC SFD\n");
1794 
1795 	return err;
1796 }
1797 
mlxsw_sp_port_mdb_del(struct mlxsw_sp_port * mlxsw_sp_port,const struct switchdev_obj_port_mdb * mdb)1798 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1799 				 const struct switchdev_obj_port_mdb *mdb)
1800 {
1801 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1802 	struct net_device *orig_dev = mdb->obj.orig_dev;
1803 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1804 	struct mlxsw_sp_bridge_device *bridge_device;
1805 	struct net_device *dev = mlxsw_sp_port->dev;
1806 	struct mlxsw_sp_bridge_port *bridge_port;
1807 	struct mlxsw_sp_mid *mid;
1808 	u16 fid_index;
1809 
1810 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1811 	if (!bridge_port)
1812 		return 0;
1813 
1814 	bridge_device = bridge_port->bridge_device;
1815 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1816 							       bridge_device,
1817 							       mdb->vid);
1818 	if (!mlxsw_sp_port_vlan)
1819 		return 0;
1820 
1821 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1822 
1823 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1824 	if (!mid) {
1825 		netdev_err(dev, "Unable to remove port from MC DB\n");
1826 		return -EINVAL;
1827 	}
1828 
1829 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1830 }
1831 
1832 static void
mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port * mlxsw_sp_port,struct mlxsw_sp_bridge_port * bridge_port)1833 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1834 			       struct mlxsw_sp_bridge_port *bridge_port)
1835 {
1836 	struct mlxsw_sp_bridge_device *bridge_device;
1837 	struct mlxsw_sp_mid *mid, *tmp;
1838 
1839 	bridge_device = bridge_port->bridge_device;
1840 
1841 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1842 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1843 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1844 						mid);
1845 		} else if (bridge_device->multicast_enabled &&
1846 			   bridge_port->mrouter) {
1847 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1848 		}
1849 	}
1850 }
1851 
mlxsw_sp_port_obj_del(struct net_device * dev,const struct switchdev_obj * obj)1852 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1853 				 const struct switchdev_obj *obj)
1854 {
1855 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1856 	int err = 0;
1857 
1858 	switch (obj->id) {
1859 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1860 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1861 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1862 		break;
1863 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1864 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1865 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1866 		break;
1867 	default:
1868 		err = -EOPNOTSUPP;
1869 		break;
1870 	}
1871 
1872 	mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1873 
1874 	return err;
1875 }
1876 
mlxsw_sp_lag_rep_port(struct mlxsw_sp * mlxsw_sp,u16 lag_id)1877 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1878 						   u16 lag_id)
1879 {
1880 	struct mlxsw_sp_port *mlxsw_sp_port;
1881 	u64 max_lag_members;
1882 	int i;
1883 
1884 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1885 					     MAX_LAG_MEMBERS);
1886 	for (i = 0; i < max_lag_members; i++) {
1887 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1888 		if (mlxsw_sp_port)
1889 			return mlxsw_sp_port;
1890 	}
1891 	return NULL;
1892 }
1893 
1894 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1895 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
1896 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
1897 	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
1898 	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
1899 };
1900 
1901 static int
mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)1902 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1903 				struct mlxsw_sp_bridge_port *bridge_port,
1904 				struct mlxsw_sp_port *mlxsw_sp_port,
1905 				struct netlink_ext_ack *extack)
1906 {
1907 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1908 
1909 	if (is_vlan_dev(bridge_port->dev)) {
1910 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1911 		return -EINVAL;
1912 	}
1913 
1914 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1915 	if (WARN_ON(!mlxsw_sp_port_vlan))
1916 		return -EINVAL;
1917 
1918 	/* Let VLAN-aware bridge take care of its own VLANs */
1919 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1920 
1921 	return 0;
1922 }
1923 
1924 static void
mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)1925 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1926 				 struct mlxsw_sp_bridge_port *bridge_port,
1927 				 struct mlxsw_sp_port *mlxsw_sp_port)
1928 {
1929 	mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
1930 	/* Make sure untagged frames are allowed to ingress */
1931 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
1932 }
1933 
1934 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device * bridge_device,u16 vid)1935 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
1936 			      u16 vid)
1937 {
1938 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1939 
1940 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
1941 }
1942 
1943 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
1944 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
1945 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
1946 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
1947 };
1948 
1949 static bool
mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port * mlxsw_sp_port,const struct net_device * br_dev)1950 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
1951 			   const struct net_device *br_dev)
1952 {
1953 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1954 
1955 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
1956 			    list) {
1957 		if (mlxsw_sp_port_vlan->bridge_port &&
1958 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
1959 		    br_dev)
1960 			return true;
1961 	}
1962 
1963 	return false;
1964 }
1965 
1966 static int
mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port,struct netlink_ext_ack * extack)1967 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1968 				struct mlxsw_sp_bridge_port *bridge_port,
1969 				struct mlxsw_sp_port *mlxsw_sp_port,
1970 				struct netlink_ext_ack *extack)
1971 {
1972 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1973 	struct net_device *dev = bridge_port->dev;
1974 	u16 vid;
1975 
1976 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
1977 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1978 	if (WARN_ON(!mlxsw_sp_port_vlan))
1979 		return -EINVAL;
1980 
1981 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
1982 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
1983 		return -EINVAL;
1984 	}
1985 
1986 	/* Port is no longer usable as a router interface */
1987 	if (mlxsw_sp_port_vlan->fid)
1988 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1989 
1990 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1991 }
1992 
1993 static void
mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device * bridge_device,struct mlxsw_sp_bridge_port * bridge_port,struct mlxsw_sp_port * mlxsw_sp_port)1994 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1995 				 struct mlxsw_sp_bridge_port *bridge_port,
1996 				 struct mlxsw_sp_port *mlxsw_sp_port)
1997 {
1998 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1999 	struct net_device *dev = bridge_port->dev;
2000 	u16 vid;
2001 
2002 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2003 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2004 	if (!mlxsw_sp_port_vlan)
2005 		return;
2006 
2007 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2008 }
2009 
2010 static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device * bridge_device,u16 vid)2011 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2012 			      u16 vid)
2013 {
2014 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2015 
2016 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2017 }
2018 
2019 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2020 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2021 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2022 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2023 };
2024 
mlxsw_sp_port_bridge_join(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * brport_dev,struct net_device * br_dev,struct netlink_ext_ack * extack)2025 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2026 			      struct net_device *brport_dev,
2027 			      struct net_device *br_dev,
2028 			      struct netlink_ext_ack *extack)
2029 {
2030 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2031 	struct mlxsw_sp_bridge_device *bridge_device;
2032 	struct mlxsw_sp_bridge_port *bridge_port;
2033 	int err;
2034 
2035 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2036 	if (IS_ERR(bridge_port))
2037 		return PTR_ERR(bridge_port);
2038 	bridge_device = bridge_port->bridge_device;
2039 
2040 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2041 					    mlxsw_sp_port, extack);
2042 	if (err)
2043 		goto err_port_join;
2044 
2045 	return 0;
2046 
2047 err_port_join:
2048 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2049 	return err;
2050 }
2051 
mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * brport_dev,struct net_device * br_dev)2052 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2053 				struct net_device *brport_dev,
2054 				struct net_device *br_dev)
2055 {
2056 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2057 	struct mlxsw_sp_bridge_device *bridge_device;
2058 	struct mlxsw_sp_bridge_port *bridge_port;
2059 
2060 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2061 	if (!bridge_device)
2062 		return;
2063 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2064 	if (!bridge_port)
2065 		return;
2066 
2067 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2068 				       mlxsw_sp_port);
2069 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2070 }
2071 
2072 static void
mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,const char * mac,u16 vid,struct net_device * dev)2073 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2074 			    const char *mac, u16 vid,
2075 			    struct net_device *dev)
2076 {
2077 	struct switchdev_notifier_fdb_info info;
2078 
2079 	info.addr = mac;
2080 	info.vid = vid;
2081 	call_switchdev_notifiers(type, dev, &info.info);
2082 }
2083 
mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index,bool adding)2084 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2085 					    char *sfn_pl, int rec_index,
2086 					    bool adding)
2087 {
2088 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2089 	struct mlxsw_sp_bridge_device *bridge_device;
2090 	struct mlxsw_sp_bridge_port *bridge_port;
2091 	struct mlxsw_sp_port *mlxsw_sp_port;
2092 	enum switchdev_notifier_type type;
2093 	char mac[ETH_ALEN];
2094 	u8 local_port;
2095 	u16 vid, fid;
2096 	bool do_notification = true;
2097 	int err;
2098 
2099 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2100 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2101 	if (!mlxsw_sp_port) {
2102 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2103 		goto just_remove;
2104 	}
2105 
2106 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2107 	if (!mlxsw_sp_port_vlan) {
2108 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2109 		goto just_remove;
2110 	}
2111 
2112 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2113 	if (!bridge_port) {
2114 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2115 		goto just_remove;
2116 	}
2117 
2118 	bridge_device = bridge_port->bridge_device;
2119 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2120 
2121 do_fdb_op:
2122 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2123 				      adding, true);
2124 	if (err) {
2125 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2126 		return;
2127 	}
2128 
2129 	if (!do_notification)
2130 		return;
2131 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2132 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
2133 
2134 	return;
2135 
2136 just_remove:
2137 	adding = false;
2138 	do_notification = false;
2139 	goto do_fdb_op;
2140 }
2141 
mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index,bool adding)2142 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2143 						char *sfn_pl, int rec_index,
2144 						bool adding)
2145 {
2146 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2147 	struct mlxsw_sp_bridge_device *bridge_device;
2148 	struct mlxsw_sp_bridge_port *bridge_port;
2149 	struct mlxsw_sp_port *mlxsw_sp_port;
2150 	enum switchdev_notifier_type type;
2151 	char mac[ETH_ALEN];
2152 	u16 lag_vid = 0;
2153 	u16 lag_id;
2154 	u16 vid, fid;
2155 	bool do_notification = true;
2156 	int err;
2157 
2158 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2159 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2160 	if (!mlxsw_sp_port) {
2161 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2162 		goto just_remove;
2163 	}
2164 
2165 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2166 	if (!mlxsw_sp_port_vlan) {
2167 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2168 		goto just_remove;
2169 	}
2170 
2171 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2172 	if (!bridge_port) {
2173 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2174 		goto just_remove;
2175 	}
2176 
2177 	bridge_device = bridge_port->bridge_device;
2178 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2179 	lag_vid = mlxsw_sp_port_vlan->vid;
2180 
2181 do_fdb_op:
2182 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2183 					  adding, true);
2184 	if (err) {
2185 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2186 		return;
2187 	}
2188 
2189 	if (!do_notification)
2190 		return;
2191 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2192 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
2193 
2194 	return;
2195 
2196 just_remove:
2197 	adding = false;
2198 	do_notification = false;
2199 	goto do_fdb_op;
2200 }
2201 
mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp * mlxsw_sp,char * sfn_pl,int rec_index)2202 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2203 					    char *sfn_pl, int rec_index)
2204 {
2205 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2206 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2207 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2208 						rec_index, true);
2209 		break;
2210 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2211 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2212 						rec_index, false);
2213 		break;
2214 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2215 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2216 						    rec_index, true);
2217 		break;
2218 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2219 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2220 						    rec_index, false);
2221 		break;
2222 	}
2223 }
2224 
mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp * mlxsw_sp)2225 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2226 {
2227 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2228 
2229 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2230 			       msecs_to_jiffies(bridge->fdb_notify.interval));
2231 }
2232 
mlxsw_sp_fdb_notify_work(struct work_struct * work)2233 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2234 {
2235 	struct mlxsw_sp_bridge *bridge;
2236 	struct mlxsw_sp *mlxsw_sp;
2237 	char *sfn_pl;
2238 	u8 num_rec;
2239 	int i;
2240 	int err;
2241 
2242 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2243 	if (!sfn_pl)
2244 		return;
2245 
2246 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2247 	mlxsw_sp = bridge->mlxsw_sp;
2248 
2249 	rtnl_lock();
2250 	mlxsw_reg_sfn_pack(sfn_pl);
2251 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2252 	if (err) {
2253 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2254 		goto out;
2255 	}
2256 	num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2257 	for (i = 0; i < num_rec; i++)
2258 		mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2259 
2260 out:
2261 	rtnl_unlock();
2262 	kfree(sfn_pl);
2263 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2264 }
2265 
2266 struct mlxsw_sp_switchdev_event_work {
2267 	struct work_struct work;
2268 	struct switchdev_notifier_fdb_info fdb_info;
2269 	struct net_device *dev;
2270 	unsigned long event;
2271 };
2272 
mlxsw_sp_switchdev_event_work(struct work_struct * work)2273 static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
2274 {
2275 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2276 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2277 	struct net_device *dev = switchdev_work->dev;
2278 	struct switchdev_notifier_fdb_info *fdb_info;
2279 	struct mlxsw_sp_port *mlxsw_sp_port;
2280 	int err;
2281 
2282 	rtnl_lock();
2283 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2284 	if (!mlxsw_sp_port)
2285 		goto out;
2286 
2287 	switch (switchdev_work->event) {
2288 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2289 		fdb_info = &switchdev_work->fdb_info;
2290 		if (!fdb_info->added_by_user)
2291 			break;
2292 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2293 		if (err)
2294 			break;
2295 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2296 					    fdb_info->addr,
2297 					    fdb_info->vid, dev);
2298 		break;
2299 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2300 		fdb_info = &switchdev_work->fdb_info;
2301 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2302 		break;
2303 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2304 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2305 		/* These events are only used to potentially update an existing
2306 		 * SPAN mirror.
2307 		 */
2308 		break;
2309 	}
2310 
2311 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2312 
2313 out:
2314 	rtnl_unlock();
2315 	kfree(switchdev_work->fdb_info.addr);
2316 	kfree(switchdev_work);
2317 	dev_put(dev);
2318 }
2319 
2320 /* Called under rcu_read_lock() */
mlxsw_sp_switchdev_event(struct notifier_block * unused,unsigned long event,void * ptr)2321 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
2322 				    unsigned long event, void *ptr)
2323 {
2324 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2325 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
2326 	struct switchdev_notifier_fdb_info *fdb_info = ptr;
2327 	struct net_device *br_dev;
2328 
2329 	/* Tunnel devices are not our uppers, so check their master instead */
2330 	br_dev = netdev_master_upper_dev_get_rcu(dev);
2331 	if (!br_dev)
2332 		return NOTIFY_DONE;
2333 	if (!netif_is_bridge_master(br_dev))
2334 		return NOTIFY_DONE;
2335 	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
2336 		return NOTIFY_DONE;
2337 
2338 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2339 	if (!switchdev_work)
2340 		return NOTIFY_BAD;
2341 
2342 	INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
2343 	switchdev_work->dev = dev;
2344 	switchdev_work->event = event;
2345 
2346 	switch (event) {
2347 	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
2348 	case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
2349 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2350 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2351 		memcpy(&switchdev_work->fdb_info, ptr,
2352 		       sizeof(switchdev_work->fdb_info));
2353 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2354 		if (!switchdev_work->fdb_info.addr)
2355 			goto err_addr_alloc;
2356 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2357 				fdb_info->addr);
2358 		/* Take a reference on the device. This can be either
2359 		 * upper device containig mlxsw_sp_port or just a
2360 		 * mlxsw_sp_port
2361 		 */
2362 		dev_hold(dev);
2363 		break;
2364 	default:
2365 		kfree(switchdev_work);
2366 		return NOTIFY_DONE;
2367 	}
2368 
2369 	mlxsw_core_schedule_work(&switchdev_work->work);
2370 
2371 	return NOTIFY_DONE;
2372 
2373 err_addr_alloc:
2374 	kfree(switchdev_work);
2375 	return NOTIFY_BAD;
2376 }
2377 
2378 static struct notifier_block mlxsw_sp_switchdev_notifier = {
2379 	.notifier_call = mlxsw_sp_switchdev_event,
2380 };
2381 
2382 u8
mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port * bridge_port)2383 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
2384 {
2385 	return bridge_port->stp_state;
2386 }
2387 
mlxsw_sp_fdb_init(struct mlxsw_sp * mlxsw_sp)2388 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
2389 {
2390 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2391 	int err;
2392 
2393 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
2394 	if (err) {
2395 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
2396 		return err;
2397 	}
2398 
2399 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2400 	if (err) {
2401 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
2402 		return err;
2403 	}
2404 
2405 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
2406 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
2407 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2408 	return 0;
2409 }
2410 
mlxsw_sp_fdb_fini(struct mlxsw_sp * mlxsw_sp)2411 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
2412 {
2413 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
2414 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2415 
2416 }
2417 
mlxsw_sp_switchdev_init(struct mlxsw_sp * mlxsw_sp)2418 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
2419 {
2420 	struct mlxsw_sp_bridge *bridge;
2421 
2422 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
2423 	if (!bridge)
2424 		return -ENOMEM;
2425 	mlxsw_sp->bridge = bridge;
2426 	bridge->mlxsw_sp = mlxsw_sp;
2427 
2428 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
2429 
2430 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
2431 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
2432 
2433 	return mlxsw_sp_fdb_init(mlxsw_sp);
2434 }
2435 
mlxsw_sp_switchdev_fini(struct mlxsw_sp * mlxsw_sp)2436 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
2437 {
2438 	mlxsw_sp_fdb_fini(mlxsw_sp);
2439 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
2440 	kfree(mlxsw_sp->bridge);
2441 }
2442 
mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port * mlxsw_sp_port)2443 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
2444 {
2445 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
2446 }
2447 
mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port * mlxsw_sp_port)2448 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2449 {
2450 }
2451