1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021 Mellanox Technologies. */
3
4 #include <linux/netdevice.h>
5 #include <linux/if_bridge.h>
6 #include <net/netevent.h>
7 #include <net/switchdev.h>
8 #include "bridge.h"
9 #include "esw/bridge.h"
10 #include "en_rep.h"
11
12 #define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
13
14 struct mlx5_bridge_switchdev_fdb_work {
15 struct work_struct work;
16 struct switchdev_notifier_fdb_info fdb_info;
17 struct net_device *dev;
18 struct mlx5_esw_bridge_offloads *br_offloads;
19 bool add;
20 };
21
mlx5_esw_bridge_dev_same_esw(struct net_device * dev,struct mlx5_eswitch * esw)22 static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw)
23 {
24 struct mlx5e_priv *priv = netdev_priv(dev);
25
26 return esw == priv->mdev->priv.eswitch;
27 }
28
mlx5_esw_bridge_dev_same_hw(struct net_device * dev,struct mlx5_eswitch * esw)29 static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw)
30 {
31 struct mlx5e_priv *priv = netdev_priv(dev);
32 struct mlx5_core_dev *mdev, *esw_mdev;
33 u64 system_guid, esw_system_guid;
34
35 mdev = priv->mdev;
36 esw_mdev = esw->dev;
37
38 system_guid = mlx5_query_nic_system_image_guid(mdev);
39 esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev);
40
41 return system_guid == esw_system_guid;
42 }
43
44 static struct net_device *
mlx5_esw_bridge_lag_rep_get(struct net_device * dev,struct mlx5_eswitch * esw)45 mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw)
46 {
47 struct net_device *lower;
48 struct list_head *iter;
49
50 netdev_for_each_lower_dev(dev, lower, iter) {
51 struct mlx5_core_dev *mdev;
52 struct mlx5e_priv *priv;
53
54 if (!mlx5e_eswitch_rep(lower))
55 continue;
56
57 priv = netdev_priv(lower);
58 mdev = priv->mdev;
59 if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw))
60 return lower;
61 }
62
63 return NULL;
64 }
65
66 static struct net_device *
mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device * dev,struct mlx5_eswitch * esw,u16 * vport_num,u16 * esw_owner_vhca_id)67 mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
68 u16 *vport_num, u16 *esw_owner_vhca_id)
69 {
70 struct mlx5e_rep_priv *rpriv;
71 struct mlx5e_priv *priv;
72
73 if (netif_is_lag_master(dev))
74 dev = mlx5_esw_bridge_lag_rep_get(dev, esw);
75
76 if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw))
77 return NULL;
78
79 priv = netdev_priv(dev);
80 rpriv = priv->ppriv;
81 *vport_num = rpriv->rep->vport;
82 *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id);
83 return dev;
84 }
85
86 static struct net_device *
mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device * dev,struct mlx5_eswitch * esw,u16 * vport_num,u16 * esw_owner_vhca_id)87 mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw,
88 u16 *vport_num, u16 *esw_owner_vhca_id)
89 {
90 struct net_device *lower_dev;
91 struct list_head *iter;
92
93 if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev))
94 return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num,
95 esw_owner_vhca_id);
96
97 netdev_for_each_lower_dev(dev, lower_dev, iter) {
98 struct net_device *rep;
99
100 if (netif_is_bridge_master(lower_dev))
101 continue;
102
103 rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num,
104 esw_owner_vhca_id);
105 if (rep)
106 return rep;
107 }
108
109 return NULL;
110 }
111
mlx5_esw_bridge_is_local(struct net_device * dev,struct net_device * rep,struct mlx5_eswitch * esw)112 static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep,
113 struct mlx5_eswitch *esw)
114 {
115 struct mlx5_core_dev *mdev;
116 struct mlx5e_priv *priv;
117
118 if (!mlx5_esw_bridge_dev_same_esw(rep, esw))
119 return false;
120
121 priv = netdev_priv(rep);
122 mdev = priv->mdev;
123 if (netif_is_lag_master(dev))
124 return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev);
125 return true;
126 }
127
mlx5_esw_bridge_port_changeupper(struct notifier_block * nb,void * ptr)128 static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
129 {
130 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
131 struct mlx5_esw_bridge_offloads,
132 netdev_nb);
133 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
134 struct netdev_notifier_changeupper_info *info = ptr;
135 struct net_device *upper = info->upper_dev, *rep;
136 struct mlx5_eswitch *esw = br_offloads->esw;
137 u16 vport_num, esw_owner_vhca_id;
138 struct netlink_ext_ack *extack;
139 int ifindex = upper->ifindex;
140 int err = 0;
141
142 if (!netif_is_bridge_master(upper))
143 return 0;
144
145 rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
146 if (!rep)
147 return 0;
148
149 extack = netdev_notifier_info_to_extack(&info->info);
150
151 if (mlx5_esw_bridge_is_local(dev, rep, esw))
152 err = info->linking ?
153 mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id,
154 br_offloads, extack) :
155 mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id,
156 br_offloads, extack);
157 else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
158 err = info->linking ?
159 mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id,
160 br_offloads, extack) :
161 mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id,
162 br_offloads, extack);
163
164 return err;
165 }
166
167 static int
mlx5_esw_bridge_changeupper_validate_netdev(void * ptr)168 mlx5_esw_bridge_changeupper_validate_netdev(void *ptr)
169 {
170 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
171 struct netdev_notifier_changeupper_info *info = ptr;
172 struct net_device *upper = info->upper_dev;
173 struct net_device *lower;
174 struct list_head *iter;
175
176 if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev))
177 return 0;
178
179 netdev_for_each_lower_dev(dev, lower, iter) {
180 struct mlx5_core_dev *mdev;
181 struct mlx5e_priv *priv;
182
183 if (!mlx5e_eswitch_rep(lower))
184 continue;
185
186 priv = netdev_priv(lower);
187 mdev = priv->mdev;
188 if (!mlx5_lag_is_active(mdev))
189 return -EAGAIN;
190 if (!mlx5_lag_is_shared_fdb(mdev))
191 return -EOPNOTSUPP;
192 }
193
194 return 0;
195 }
196
mlx5_esw_bridge_switchdev_port_event(struct notifier_block * nb,unsigned long event,void * ptr)197 static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
198 unsigned long event, void *ptr)
199 {
200 int err = 0;
201
202 switch (event) {
203 case NETDEV_PRECHANGEUPPER:
204 err = mlx5_esw_bridge_changeupper_validate_netdev(ptr);
205 break;
206
207 case NETDEV_CHANGEUPPER:
208 err = mlx5_esw_bridge_port_changeupper(nb, ptr);
209 break;
210 }
211
212 return notifier_from_errno(err);
213 }
214
215 static int
mlx5_esw_bridge_port_obj_add(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,struct mlx5_esw_bridge_offloads * br_offloads)216 mlx5_esw_bridge_port_obj_add(struct net_device *dev,
217 struct switchdev_notifier_port_obj_info *port_obj_info,
218 struct mlx5_esw_bridge_offloads *br_offloads)
219 {
220 struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
221 const struct switchdev_obj *obj = port_obj_info->obj;
222 const struct switchdev_obj_port_vlan *vlan;
223 u16 vport_num, esw_owner_vhca_id;
224 int err;
225
226 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
227 &esw_owner_vhca_id))
228 return 0;
229
230 port_obj_info->handled = true;
231
232 switch (obj->id) {
233 case SWITCHDEV_OBJ_ID_PORT_VLAN:
234 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
235 err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid,
236 vlan->flags, br_offloads, extack);
237 break;
238 default:
239 return -EOPNOTSUPP;
240 }
241 return err;
242 }
243
244 static int
mlx5_esw_bridge_port_obj_del(struct net_device * dev,struct switchdev_notifier_port_obj_info * port_obj_info,struct mlx5_esw_bridge_offloads * br_offloads)245 mlx5_esw_bridge_port_obj_del(struct net_device *dev,
246 struct switchdev_notifier_port_obj_info *port_obj_info,
247 struct mlx5_esw_bridge_offloads *br_offloads)
248 {
249 const struct switchdev_obj *obj = port_obj_info->obj;
250 const struct switchdev_obj_port_vlan *vlan;
251 u16 vport_num, esw_owner_vhca_id;
252
253 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
254 &esw_owner_vhca_id))
255 return 0;
256
257 port_obj_info->handled = true;
258
259 switch (obj->id) {
260 case SWITCHDEV_OBJ_ID_PORT_VLAN:
261 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
262 mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads);
263 break;
264 default:
265 return -EOPNOTSUPP;
266 }
267 return 0;
268 }
269
270 static int
mlx5_esw_bridge_port_obj_attr_set(struct net_device * dev,struct switchdev_notifier_port_attr_info * port_attr_info,struct mlx5_esw_bridge_offloads * br_offloads)271 mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
272 struct switchdev_notifier_port_attr_info *port_attr_info,
273 struct mlx5_esw_bridge_offloads *br_offloads)
274 {
275 struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info);
276 const struct switchdev_attr *attr = port_attr_info->attr;
277 u16 vport_num, esw_owner_vhca_id;
278 int err = 0;
279
280 if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
281 &esw_owner_vhca_id))
282 return 0;
283
284 port_attr_info->handled = true;
285
286 switch (attr->id) {
287 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
288 if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
289 NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
290 err = -EINVAL;
291 }
292 break;
293 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
294 break;
295 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
296 err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id,
297 attr->u.ageing_time, br_offloads);
298 break;
299 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
300 err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id,
301 attr->u.vlan_filtering, br_offloads);
302 break;
303 default:
304 err = -EOPNOTSUPP;
305 }
306
307 return err;
308 }
309
mlx5_esw_bridge_event_blocking(struct notifier_block * nb,unsigned long event,void * ptr)310 static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb,
311 unsigned long event, void *ptr)
312 {
313 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
314 struct mlx5_esw_bridge_offloads,
315 nb_blk);
316 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
317 int err;
318
319 switch (event) {
320 case SWITCHDEV_PORT_OBJ_ADD:
321 err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads);
322 break;
323 case SWITCHDEV_PORT_OBJ_DEL:
324 err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads);
325 break;
326 case SWITCHDEV_PORT_ATTR_SET:
327 err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
328 break;
329 default:
330 err = 0;
331 }
332
333 return notifier_from_errno(err);
334 }
335
336 static void
mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work * fdb_work)337 mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
338 {
339 dev_put(fdb_work->dev);
340 kfree(fdb_work->fdb_info.addr);
341 kfree(fdb_work);
342 }
343
mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct * work)344 static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
345 {
346 struct mlx5_bridge_switchdev_fdb_work *fdb_work =
347 container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
348 struct switchdev_notifier_fdb_info *fdb_info =
349 &fdb_work->fdb_info;
350 struct mlx5_esw_bridge_offloads *br_offloads =
351 fdb_work->br_offloads;
352 struct net_device *dev = fdb_work->dev;
353 u16 vport_num, esw_owner_vhca_id;
354
355 rtnl_lock();
356
357 if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num,
358 &esw_owner_vhca_id))
359 goto out;
360
361 if (fdb_work->add)
362 mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads,
363 fdb_info);
364 else
365 mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads,
366 fdb_info);
367
368 out:
369 rtnl_unlock();
370 mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
371 }
372
373 static struct mlx5_bridge_switchdev_fdb_work *
mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device * dev,bool add,struct switchdev_notifier_fdb_info * fdb_info,struct mlx5_esw_bridge_offloads * br_offloads)374 mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
375 struct switchdev_notifier_fdb_info *fdb_info,
376 struct mlx5_esw_bridge_offloads *br_offloads)
377 {
378 struct mlx5_bridge_switchdev_fdb_work *work;
379 u8 *addr;
380
381 work = kzalloc(sizeof(*work), GFP_ATOMIC);
382 if (!work)
383 return ERR_PTR(-ENOMEM);
384
385 INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
386 memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
387
388 addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
389 if (!addr) {
390 kfree(work);
391 return ERR_PTR(-ENOMEM);
392 }
393 ether_addr_copy(addr, fdb_info->addr);
394 work->fdb_info.addr = addr;
395
396 dev_hold(dev);
397 work->dev = dev;
398 work->br_offloads = br_offloads;
399 work->add = add;
400 return work;
401 }
402
mlx5_esw_bridge_switchdev_event(struct notifier_block * nb,unsigned long event,void * ptr)403 static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
404 unsigned long event, void *ptr)
405 {
406 struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
407 struct mlx5_esw_bridge_offloads,
408 nb);
409 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
410 struct switchdev_notifier_fdb_info *fdb_info;
411 struct mlx5_bridge_switchdev_fdb_work *work;
412 struct mlx5_eswitch *esw = br_offloads->esw;
413 struct switchdev_notifier_info *info = ptr;
414 u16 vport_num, esw_owner_vhca_id;
415 struct net_device *upper, *rep;
416
417 if (event == SWITCHDEV_PORT_ATTR_SET) {
418 int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads);
419
420 return notifier_from_errno(err);
421 }
422
423 upper = netdev_master_upper_dev_get_rcu(dev);
424 if (!upper)
425 return NOTIFY_DONE;
426 if (!netif_is_bridge_master(upper))
427 return NOTIFY_DONE;
428
429 rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id);
430 if (!rep)
431 return NOTIFY_DONE;
432
433 switch (event) {
434 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
435 fdb_info = container_of(info,
436 struct switchdev_notifier_fdb_info,
437 info);
438 mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads,
439 fdb_info);
440 break;
441 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
442 /* only handle the event on peers */
443 if (mlx5_esw_bridge_is_local(dev, rep, esw))
444 break;
445 fallthrough;
446 case SWITCHDEV_FDB_ADD_TO_DEVICE:
447 case SWITCHDEV_FDB_DEL_TO_DEVICE:
448 fdb_info = container_of(info,
449 struct switchdev_notifier_fdb_info,
450 info);
451
452 work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
453 event == SWITCHDEV_FDB_ADD_TO_DEVICE,
454 fdb_info,
455 br_offloads);
456 if (IS_ERR(work)) {
457 WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
458 PTR_ERR(work));
459 return notifier_from_errno(PTR_ERR(work));
460 }
461
462 queue_work(br_offloads->wq, &work->work);
463 break;
464 default:
465 break;
466 }
467 return NOTIFY_DONE;
468 }
469
mlx5_esw_bridge_update_work(struct work_struct * work)470 static void mlx5_esw_bridge_update_work(struct work_struct *work)
471 {
472 struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
473 struct mlx5_esw_bridge_offloads,
474 update_work.work);
475
476 rtnl_lock();
477 mlx5_esw_bridge_update(br_offloads);
478 rtnl_unlock();
479
480 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
481 msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
482 }
483
mlx5e_rep_bridge_init(struct mlx5e_priv * priv)484 void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
485 {
486 struct mlx5_esw_bridge_offloads *br_offloads;
487 struct mlx5_core_dev *mdev = priv->mdev;
488 struct mlx5_eswitch *esw =
489 mdev->priv.eswitch;
490 int err;
491
492 rtnl_lock();
493 br_offloads = mlx5_esw_bridge_init(esw);
494 rtnl_unlock();
495 if (IS_ERR(br_offloads)) {
496 esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
497 return;
498 }
499
500 br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
501 if (!br_offloads->wq) {
502 esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
503 goto err_alloc_wq;
504 }
505
506 br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
507 err = register_switchdev_notifier(&br_offloads->nb);
508 if (err) {
509 esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
510 goto err_register_swdev;
511 }
512
513 br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
514 err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
515 if (err) {
516 esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
517 goto err_register_swdev_blk;
518 }
519
520 br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
521 err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
522 if (err) {
523 esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
524 err);
525 goto err_register_netdev;
526 }
527 INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
528 queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
529 msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
530 return;
531
532 err_register_netdev:
533 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
534 err_register_swdev_blk:
535 unregister_switchdev_notifier(&br_offloads->nb);
536 err_register_swdev:
537 destroy_workqueue(br_offloads->wq);
538 err_alloc_wq:
539 rtnl_lock();
540 mlx5_esw_bridge_cleanup(esw);
541 rtnl_unlock();
542 }
543
mlx5e_rep_bridge_cleanup(struct mlx5e_priv * priv)544 void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
545 {
546 struct mlx5_esw_bridge_offloads *br_offloads;
547 struct mlx5_core_dev *mdev = priv->mdev;
548 struct mlx5_eswitch *esw =
549 mdev->priv.eswitch;
550
551 br_offloads = esw->br_offloads;
552 if (!br_offloads)
553 return;
554
555 cancel_delayed_work_sync(&br_offloads->update_work);
556 unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
557 unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
558 unregister_switchdev_notifier(&br_offloads->nb);
559 destroy_workqueue(br_offloads->wq);
560 rtnl_lock();
561 mlx5_esw_bridge_cleanup(esw);
562 rtnl_unlock();
563 }
564