1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3
4 #include <linux/mlx5/driver.h>
5 #include "eswitch.h"
6 #include "priv.h"
7 #include "sf/dev/dev.h"
8 #include "mlx5_ifc_vhca_event.h"
9 #include "vhca_event.h"
10 #include "ecpf.h"
11 #define CREATE_TRACE_POINTS
12 #include "diag/sf_tracepoint.h"
13
14 struct mlx5_sf {
15 struct mlx5_devlink_port dl_port;
16 unsigned int port_index;
17 u32 controller;
18 u16 id;
19 u16 hw_fn_id;
20 u16 hw_state;
21 };
22
23 struct mlx5_sf_table {
24 struct mlx5_core_dev *dev; /* To refer from notifier context. */
25 struct xarray port_indices; /* port index based lookup. */
26 refcount_t refcount;
27 struct completion disable_complete;
28 struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
29 struct notifier_block esw_nb;
30 struct notifier_block vhca_nb;
31 };
32
33 static struct mlx5_sf *
mlx5_sf_lookup_by_index(struct mlx5_sf_table * table,unsigned int port_index)34 mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
35 {
36 return xa_load(&table->port_indices, port_index);
37 }
38
39 static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table * table,unsigned int fn_id)40 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
41 {
42 unsigned long index;
43 struct mlx5_sf *sf;
44
45 xa_for_each(&table->port_indices, index, sf) {
46 if (sf->hw_fn_id == fn_id)
47 return sf;
48 }
49 return NULL;
50 }
51
mlx5_sf_id_insert(struct mlx5_sf_table * table,struct mlx5_sf * sf)52 static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
53 {
54 return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
55 }
56
mlx5_sf_id_erase(struct mlx5_sf_table * table,struct mlx5_sf * sf)57 static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
58 {
59 xa_erase(&table->port_indices, sf->port_index);
60 }
61
62 static struct mlx5_sf *
mlx5_sf_alloc(struct mlx5_sf_table * table,struct mlx5_eswitch * esw,u32 controller,u32 sfnum,struct netlink_ext_ack * extack)63 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
64 u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
65 {
66 unsigned int dl_port_index;
67 struct mlx5_sf *sf;
68 u16 hw_fn_id;
69 int id_err;
70 int err;
71
72 if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
73 NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
74 return ERR_PTR(-EINVAL);
75 }
76
77 id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
78 if (id_err < 0) {
79 err = id_err;
80 goto id_err;
81 }
82
83 sf = kzalloc(sizeof(*sf), GFP_KERNEL);
84 if (!sf) {
85 err = -ENOMEM;
86 goto alloc_err;
87 }
88 sf->id = id_err;
89 hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
90 dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
91 sf->port_index = dl_port_index;
92 sf->hw_fn_id = hw_fn_id;
93 sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
94 sf->controller = controller;
95
96 err = mlx5_sf_id_insert(table, sf);
97 if (err)
98 goto insert_err;
99
100 return sf;
101
102 insert_err:
103 kfree(sf);
104 alloc_err:
105 mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
106 id_err:
107 if (err == -EEXIST)
108 NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
109 return ERR_PTR(err);
110 }
111
mlx5_sf_free(struct mlx5_sf_table * table,struct mlx5_sf * sf)112 static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
113 {
114 mlx5_sf_id_erase(table, sf);
115 mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
116 trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
117 kfree(sf);
118 }
119
mlx5_sf_table_try_get(struct mlx5_core_dev * dev)120 static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
121 {
122 struct mlx5_sf_table *table = dev->priv.sf_table;
123
124 if (!table)
125 return NULL;
126
127 return refcount_inc_not_zero(&table->refcount) ? table : NULL;
128 }
129
mlx5_sf_table_put(struct mlx5_sf_table * table)130 static void mlx5_sf_table_put(struct mlx5_sf_table *table)
131 {
132 if (refcount_dec_and_test(&table->refcount))
133 complete(&table->disable_complete);
134 }
135
mlx5_sf_to_devlink_state(u8 hw_state)136 static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
137 {
138 switch (hw_state) {
139 case MLX5_VHCA_STATE_ACTIVE:
140 case MLX5_VHCA_STATE_IN_USE:
141 return DEVLINK_PORT_FN_STATE_ACTIVE;
142 case MLX5_VHCA_STATE_INVALID:
143 case MLX5_VHCA_STATE_ALLOCATED:
144 case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
145 default:
146 return DEVLINK_PORT_FN_STATE_INACTIVE;
147 }
148 }
149
mlx5_sf_to_devlink_opstate(u8 hw_state)150 static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
151 {
152 switch (hw_state) {
153 case MLX5_VHCA_STATE_IN_USE:
154 case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
155 return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
156 case MLX5_VHCA_STATE_INVALID:
157 case MLX5_VHCA_STATE_ALLOCATED:
158 case MLX5_VHCA_STATE_ACTIVE:
159 default:
160 return DEVLINK_PORT_FN_OPSTATE_DETACHED;
161 }
162 }
163
mlx5_sf_is_active(const struct mlx5_sf * sf)164 static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
165 {
166 return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
167 }
168
mlx5_devlink_sf_port_fn_state_get(struct devlink_port * dl_port,enum devlink_port_fn_state * state,enum devlink_port_fn_opstate * opstate,struct netlink_ext_ack * extack)169 int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
170 enum devlink_port_fn_state *state,
171 enum devlink_port_fn_opstate *opstate,
172 struct netlink_ext_ack *extack)
173 {
174 struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
175 struct mlx5_sf_table *table;
176 struct mlx5_sf *sf;
177 int err = 0;
178
179 table = mlx5_sf_table_try_get(dev);
180 if (!table)
181 return -EOPNOTSUPP;
182
183 sf = mlx5_sf_lookup_by_index(table, dl_port->index);
184 if (!sf) {
185 err = -EOPNOTSUPP;
186 goto sf_err;
187 }
188 mutex_lock(&table->sf_state_lock);
189 *state = mlx5_sf_to_devlink_state(sf->hw_state);
190 *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
191 mutex_unlock(&table->sf_state_lock);
192 sf_err:
193 mlx5_sf_table_put(table);
194 return err;
195 }
196
mlx5_sf_activate(struct mlx5_core_dev * dev,struct mlx5_sf * sf,struct netlink_ext_ack * extack)197 static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
198 struct netlink_ext_ack *extack)
199 {
200 int err;
201
202 if (mlx5_sf_is_active(sf))
203 return 0;
204 if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
205 NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
206 return -EBUSY;
207 }
208
209 err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
210 if (err)
211 return err;
212
213 sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
214 trace_mlx5_sf_activate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
215 return 0;
216 }
217
mlx5_sf_deactivate(struct mlx5_core_dev * dev,struct mlx5_sf * sf)218 static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
219 {
220 int err;
221
222 if (!mlx5_sf_is_active(sf))
223 return 0;
224
225 err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
226 if (err)
227 return err;
228
229 sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
230 trace_mlx5_sf_deactivate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
231 return 0;
232 }
233
mlx5_sf_state_set(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,struct mlx5_sf * sf,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)234 static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
235 struct mlx5_sf *sf,
236 enum devlink_port_fn_state state,
237 struct netlink_ext_ack *extack)
238 {
239 int err = 0;
240
241 mutex_lock(&table->sf_state_lock);
242 if (state == mlx5_sf_to_devlink_state(sf->hw_state))
243 goto out;
244 if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
245 err = mlx5_sf_activate(dev, sf, extack);
246 else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
247 err = mlx5_sf_deactivate(dev, sf);
248 else
249 err = -EINVAL;
250 out:
251 mutex_unlock(&table->sf_state_lock);
252 return err;
253 }
254
mlx5_devlink_sf_port_fn_state_set(struct devlink_port * dl_port,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)255 int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
256 enum devlink_port_fn_state state,
257 struct netlink_ext_ack *extack)
258 {
259 struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
260 struct mlx5_sf_table *table;
261 struct mlx5_sf *sf;
262 int err;
263
264 table = mlx5_sf_table_try_get(dev);
265 if (!table) {
266 NL_SET_ERR_MSG_MOD(extack,
267 "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
268 return -EOPNOTSUPP;
269 }
270 sf = mlx5_sf_lookup_by_index(table, dl_port->index);
271 if (!sf) {
272 err = -ENODEV;
273 goto out;
274 }
275
276 err = mlx5_sf_state_set(dev, table, sf, state, extack);
277 out:
278 mlx5_sf_table_put(table);
279 return err;
280 }
281
mlx5_sf_add(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,struct devlink_port ** dl_port)282 static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
283 const struct devlink_port_new_attrs *new_attr,
284 struct netlink_ext_ack *extack,
285 struct devlink_port **dl_port)
286 {
287 struct mlx5_eswitch *esw = dev->priv.eswitch;
288 struct mlx5_sf *sf;
289 int err;
290
291 sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
292 if (IS_ERR(sf))
293 return PTR_ERR(sf);
294
295 err = mlx5_eswitch_load_sf_vport(esw, sf->hw_fn_id, MLX5_VPORT_UC_ADDR_CHANGE,
296 &sf->dl_port, new_attr->controller, new_attr->sfnum);
297 if (err)
298 goto esw_err;
299 *dl_port = &sf->dl_port.dl_port;
300 trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
301 return 0;
302
303 esw_err:
304 mlx5_sf_free(table, sf);
305 return err;
306 }
307
308 static int
mlx5_sf_new_check_attr(struct mlx5_core_dev * dev,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack)309 mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
310 struct netlink_ext_ack *extack)
311 {
312 if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
313 NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
314 return -EOPNOTSUPP;
315 }
316 if (new_attr->port_index_valid) {
317 NL_SET_ERR_MSG_MOD(extack,
318 "Driver does not support user defined port index assignment");
319 return -EOPNOTSUPP;
320 }
321 if (!new_attr->sfnum_valid) {
322 NL_SET_ERR_MSG_MOD(extack,
323 "User must provide unique sfnum. Driver does not support auto assignment");
324 return -EOPNOTSUPP;
325 }
326 if (new_attr->controller_valid && new_attr->controller &&
327 !mlx5_core_is_ecpf_esw_manager(dev)) {
328 NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
329 return -EOPNOTSUPP;
330 }
331 if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
332 NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
333 return -EOPNOTSUPP;
334 }
335 return 0;
336 }
337
mlx5_devlink_sf_port_new(struct devlink * devlink,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,struct devlink_port ** dl_port)338 int mlx5_devlink_sf_port_new(struct devlink *devlink,
339 const struct devlink_port_new_attrs *new_attr,
340 struct netlink_ext_ack *extack,
341 struct devlink_port **dl_port)
342 {
343 struct mlx5_core_dev *dev = devlink_priv(devlink);
344 struct mlx5_sf_table *table;
345 int err;
346
347 err = mlx5_sf_new_check_attr(dev, new_attr, extack);
348 if (err)
349 return err;
350
351 table = mlx5_sf_table_try_get(dev);
352 if (!table) {
353 NL_SET_ERR_MSG_MOD(extack,
354 "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
355 return -EOPNOTSUPP;
356 }
357 err = mlx5_sf_add(dev, table, new_attr, extack, dl_port);
358 mlx5_sf_table_put(table);
359 return err;
360 }
361
mlx5_sf_dealloc(struct mlx5_sf_table * table,struct mlx5_sf * sf)362 static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
363 {
364 if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
365 mlx5_sf_free(table, sf);
366 } else if (mlx5_sf_is_active(sf)) {
367 /* Even if its active, it is treated as in_use because by the time,
368 * it is disabled here, it may getting used. So it is safe to
369 * always look for the event to ensure that it is recycled only after
370 * firmware gives confirmation that it is detached by the driver.
371 */
372 mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
373 mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
374 kfree(sf);
375 } else {
376 mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
377 kfree(sf);
378 }
379 }
380
mlx5_devlink_sf_port_del(struct devlink * devlink,struct devlink_port * dl_port,struct netlink_ext_ack * extack)381 int mlx5_devlink_sf_port_del(struct devlink *devlink,
382 struct devlink_port *dl_port,
383 struct netlink_ext_ack *extack)
384 {
385 struct mlx5_core_dev *dev = devlink_priv(devlink);
386 struct mlx5_eswitch *esw = dev->priv.eswitch;
387 struct mlx5_sf_table *table;
388 struct mlx5_sf *sf;
389 int err = 0;
390
391 table = mlx5_sf_table_try_get(dev);
392 if (!table) {
393 NL_SET_ERR_MSG_MOD(extack,
394 "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
395 return -EOPNOTSUPP;
396 }
397 sf = mlx5_sf_lookup_by_index(table, dl_port->index);
398 if (!sf) {
399 err = -ENODEV;
400 goto sf_err;
401 }
402
403 mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
404 mlx5_sf_id_erase(table, sf);
405
406 mutex_lock(&table->sf_state_lock);
407 mlx5_sf_dealloc(table, sf);
408 mutex_unlock(&table->sf_state_lock);
409 sf_err:
410 mlx5_sf_table_put(table);
411 return err;
412 }
413
mlx5_sf_state_update_check(const struct mlx5_sf * sf,u8 new_state)414 static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
415 {
416 if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
417 return true;
418
419 if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
420 return true;
421
422 if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
423 new_state == MLX5_VHCA_STATE_ALLOCATED)
424 return true;
425
426 return false;
427 }
428
mlx5_sf_vhca_event(struct notifier_block * nb,unsigned long opcode,void * data)429 static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
430 {
431 struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
432 const struct mlx5_vhca_state_event *event = data;
433 bool update = false;
434 struct mlx5_sf *sf;
435
436 table = mlx5_sf_table_try_get(table->dev);
437 if (!table)
438 return 0;
439
440 mutex_lock(&table->sf_state_lock);
441 sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
442 if (!sf)
443 goto sf_err;
444
445 /* When driver is attached or detached to a function, an event
446 * notifies such state change.
447 */
448 update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
449 if (update)
450 sf->hw_state = event->new_vhca_state;
451 trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
452 sf->hw_fn_id, sf->hw_state);
453 sf_err:
454 mutex_unlock(&table->sf_state_lock);
455 mlx5_sf_table_put(table);
456 return 0;
457 }
458
mlx5_sf_table_enable(struct mlx5_sf_table * table)459 static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
460 {
461 init_completion(&table->disable_complete);
462 refcount_set(&table->refcount, 1);
463 }
464
mlx5_sf_deactivate_all(struct mlx5_sf_table * table)465 static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
466 {
467 struct mlx5_eswitch *esw = table->dev->priv.eswitch;
468 unsigned long index;
469 struct mlx5_sf *sf;
470
471 /* At this point, no new user commands can start and no vhca event can
472 * arrive. It is safe to destroy all user created SFs.
473 */
474 xa_for_each(&table->port_indices, index, sf) {
475 mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
476 mlx5_sf_id_erase(table, sf);
477 mlx5_sf_dealloc(table, sf);
478 }
479 }
480
mlx5_sf_table_disable(struct mlx5_sf_table * table)481 static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
482 {
483 if (!refcount_read(&table->refcount))
484 return;
485
486 /* Balances with refcount_set; drop the reference so that new user cmd cannot start
487 * and new vhca event handler cannot run.
488 */
489 mlx5_sf_table_put(table);
490 wait_for_completion(&table->disable_complete);
491
492 mlx5_sf_deactivate_all(table);
493 }
494
mlx5_sf_esw_event(struct notifier_block * nb,unsigned long event,void * data)495 static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
496 {
497 struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
498 const struct mlx5_esw_event_info *mode = data;
499
500 switch (mode->new_mode) {
501 case MLX5_ESWITCH_OFFLOADS:
502 mlx5_sf_table_enable(table);
503 break;
504 case MLX5_ESWITCH_LEGACY:
505 mlx5_sf_table_disable(table);
506 break;
507 default:
508 break;
509 }
510
511 return 0;
512 }
513
mlx5_sf_table_supported(const struct mlx5_core_dev * dev)514 static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
515 {
516 return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
517 mlx5_sf_hw_table_supported(dev);
518 }
519
mlx5_sf_table_init(struct mlx5_core_dev * dev)520 int mlx5_sf_table_init(struct mlx5_core_dev *dev)
521 {
522 struct mlx5_sf_table *table;
523 int err;
524
525 if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
526 return 0;
527
528 table = kzalloc(sizeof(*table), GFP_KERNEL);
529 if (!table)
530 return -ENOMEM;
531
532 mutex_init(&table->sf_state_lock);
533 table->dev = dev;
534 xa_init(&table->port_indices);
535 dev->priv.sf_table = table;
536 refcount_set(&table->refcount, 0);
537 table->esw_nb.notifier_call = mlx5_sf_esw_event;
538 err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
539 if (err)
540 goto reg_err;
541
542 table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
543 err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
544 if (err)
545 goto vhca_err;
546
547 return 0;
548
549 vhca_err:
550 mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
551 reg_err:
552 mutex_destroy(&table->sf_state_lock);
553 kfree(table);
554 dev->priv.sf_table = NULL;
555 return err;
556 }
557
mlx5_sf_table_cleanup(struct mlx5_core_dev * dev)558 void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
559 {
560 struct mlx5_sf_table *table = dev->priv.sf_table;
561
562 if (!table)
563 return;
564
565 mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
566 mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
567 WARN_ON(refcount_read(&table->refcount));
568 mutex_destroy(&table->sf_state_lock);
569 WARN_ON(!xa_empty(&table->port_indices));
570 kfree(table);
571 }
572