• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 
4 #include <linux/mlx5/driver.h>
5 #include "eswitch.h"
6 #include "priv.h"
7 #include "sf/dev/dev.h"
8 #include "mlx5_ifc_vhca_event.h"
9 #include "vhca_event.h"
10 #include "ecpf.h"
11 #define CREATE_TRACE_POINTS
12 #include "diag/sf_tracepoint.h"
13 
14 struct mlx5_sf {
15 	struct devlink_port dl_port;
16 	unsigned int port_index;
17 	u32 controller;
18 	u16 id;
19 	u16 hw_fn_id;
20 	u16 hw_state;
21 };
22 
23 struct mlx5_sf_table {
24 	struct mlx5_core_dev *dev; /* To refer from notifier context. */
25 	struct xarray port_indices; /* port index based lookup. */
26 	refcount_t refcount;
27 	struct completion disable_complete;
28 	struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
29 	struct notifier_block esw_nb;
30 	struct notifier_block vhca_nb;
31 	u8 ecpu: 1;
32 };
33 
34 static struct mlx5_sf *
mlx5_sf_lookup_by_index(struct mlx5_sf_table * table,unsigned int port_index)35 mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
36 {
37 	return xa_load(&table->port_indices, port_index);
38 }
39 
40 static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table * table,unsigned int fn_id)41 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
42 {
43 	unsigned long index;
44 	struct mlx5_sf *sf;
45 
46 	xa_for_each(&table->port_indices, index, sf) {
47 		if (sf->hw_fn_id == fn_id)
48 			return sf;
49 	}
50 	return NULL;
51 }
52 
mlx5_sf_id_insert(struct mlx5_sf_table * table,struct mlx5_sf * sf)53 static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
54 {
55 	return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
56 }
57 
mlx5_sf_id_erase(struct mlx5_sf_table * table,struct mlx5_sf * sf)58 static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
59 {
60 	xa_erase(&table->port_indices, sf->port_index);
61 }
62 
63 static struct mlx5_sf *
mlx5_sf_alloc(struct mlx5_sf_table * table,struct mlx5_eswitch * esw,u32 controller,u32 sfnum,struct netlink_ext_ack * extack)64 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
65 	      u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
66 {
67 	unsigned int dl_port_index;
68 	struct mlx5_sf *sf;
69 	u16 hw_fn_id;
70 	int id_err;
71 	int err;
72 
73 	if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
74 		NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
75 		return ERR_PTR(-EINVAL);
76 	}
77 
78 	id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
79 	if (id_err < 0) {
80 		err = id_err;
81 		goto id_err;
82 	}
83 
84 	sf = kzalloc(sizeof(*sf), GFP_KERNEL);
85 	if (!sf) {
86 		err = -ENOMEM;
87 		goto alloc_err;
88 	}
89 	sf->id = id_err;
90 	hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
91 	dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
92 	sf->port_index = dl_port_index;
93 	sf->hw_fn_id = hw_fn_id;
94 	sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
95 	sf->controller = controller;
96 
97 	err = mlx5_sf_id_insert(table, sf);
98 	if (err)
99 		goto insert_err;
100 
101 	return sf;
102 
103 insert_err:
104 	kfree(sf);
105 alloc_err:
106 	mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
107 id_err:
108 	if (err == -EEXIST)
109 		NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
110 	return ERR_PTR(err);
111 }
112 
mlx5_sf_free(struct mlx5_sf_table * table,struct mlx5_sf * sf)113 static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
114 {
115 	mlx5_sf_id_erase(table, sf);
116 	mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
117 	trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
118 	kfree(sf);
119 }
120 
mlx5_sf_table_try_get(struct mlx5_core_dev * dev)121 static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
122 {
123 	struct mlx5_sf_table *table = dev->priv.sf_table;
124 
125 	if (!table)
126 		return NULL;
127 
128 	return refcount_inc_not_zero(&table->refcount) ? table : NULL;
129 }
130 
mlx5_sf_table_put(struct mlx5_sf_table * table)131 static void mlx5_sf_table_put(struct mlx5_sf_table *table)
132 {
133 	if (refcount_dec_and_test(&table->refcount))
134 		complete(&table->disable_complete);
135 }
136 
mlx5_sf_to_devlink_state(u8 hw_state)137 static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
138 {
139 	switch (hw_state) {
140 	case MLX5_VHCA_STATE_ACTIVE:
141 	case MLX5_VHCA_STATE_IN_USE:
142 		return DEVLINK_PORT_FN_STATE_ACTIVE;
143 	case MLX5_VHCA_STATE_INVALID:
144 	case MLX5_VHCA_STATE_ALLOCATED:
145 	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
146 	default:
147 		return DEVLINK_PORT_FN_STATE_INACTIVE;
148 	}
149 }
150 
mlx5_sf_to_devlink_opstate(u8 hw_state)151 static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
152 {
153 	switch (hw_state) {
154 	case MLX5_VHCA_STATE_IN_USE:
155 	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
156 		return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
157 	case MLX5_VHCA_STATE_INVALID:
158 	case MLX5_VHCA_STATE_ALLOCATED:
159 	case MLX5_VHCA_STATE_ACTIVE:
160 	default:
161 		return DEVLINK_PORT_FN_OPSTATE_DETACHED;
162 	}
163 }
164 
mlx5_sf_is_active(const struct mlx5_sf * sf)165 static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
166 {
167 	return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
168 }
169 
mlx5_devlink_sf_port_fn_state_get(struct devlink_port * dl_port,enum devlink_port_fn_state * state,enum devlink_port_fn_opstate * opstate,struct netlink_ext_ack * extack)170 int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
171 				      enum devlink_port_fn_state *state,
172 				      enum devlink_port_fn_opstate *opstate,
173 				      struct netlink_ext_ack *extack)
174 {
175 	struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
176 	struct mlx5_sf_table *table;
177 	struct mlx5_sf *sf;
178 	int err = 0;
179 
180 	table = mlx5_sf_table_try_get(dev);
181 	if (!table)
182 		return -EOPNOTSUPP;
183 
184 	sf = mlx5_sf_lookup_by_index(table, dl_port->index);
185 	if (!sf) {
186 		err = -EOPNOTSUPP;
187 		goto sf_err;
188 	}
189 	mutex_lock(&table->sf_state_lock);
190 	*state = mlx5_sf_to_devlink_state(sf->hw_state);
191 	*opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
192 	mutex_unlock(&table->sf_state_lock);
193 sf_err:
194 	mlx5_sf_table_put(table);
195 	return err;
196 }
197 
mlx5_sf_activate(struct mlx5_core_dev * dev,struct mlx5_sf * sf,struct netlink_ext_ack * extack)198 static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
199 			    struct netlink_ext_ack *extack)
200 {
201 	int err;
202 
203 	if (mlx5_sf_is_active(sf))
204 		return 0;
205 	if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
206 		NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
207 		return -EBUSY;
208 	}
209 
210 	err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
211 	if (err)
212 		return err;
213 
214 	sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
215 	trace_mlx5_sf_activate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
216 	return 0;
217 }
218 
mlx5_sf_deactivate(struct mlx5_core_dev * dev,struct mlx5_sf * sf)219 static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
220 {
221 	int err;
222 
223 	if (!mlx5_sf_is_active(sf))
224 		return 0;
225 
226 	err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
227 	if (err)
228 		return err;
229 
230 	sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
231 	trace_mlx5_sf_deactivate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
232 	return 0;
233 }
234 
mlx5_sf_state_set(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,struct mlx5_sf * sf,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)235 static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
236 			     struct mlx5_sf *sf,
237 			     enum devlink_port_fn_state state,
238 			     struct netlink_ext_ack *extack)
239 {
240 	int err = 0;
241 
242 	mutex_lock(&table->sf_state_lock);
243 	if (state == mlx5_sf_to_devlink_state(sf->hw_state))
244 		goto out;
245 	if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
246 		err = mlx5_sf_activate(dev, sf, extack);
247 	else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
248 		err = mlx5_sf_deactivate(dev, sf);
249 	else
250 		err = -EINVAL;
251 out:
252 	mutex_unlock(&table->sf_state_lock);
253 	return err;
254 }
255 
mlx5_devlink_sf_port_fn_state_set(struct devlink_port * dl_port,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)256 int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
257 				      enum devlink_port_fn_state state,
258 				      struct netlink_ext_ack *extack)
259 {
260 	struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
261 	struct mlx5_sf_table *table;
262 	struct mlx5_sf *sf;
263 	int err;
264 
265 	table = mlx5_sf_table_try_get(dev);
266 	if (!table) {
267 		NL_SET_ERR_MSG_MOD(extack,
268 				   "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
269 		return -EOPNOTSUPP;
270 	}
271 	sf = mlx5_sf_lookup_by_index(table, dl_port->index);
272 	if (!sf) {
273 		err = -ENODEV;
274 		goto out;
275 	}
276 
277 	err = mlx5_sf_state_set(dev, table, sf, state, extack);
278 out:
279 	mlx5_sf_table_put(table);
280 	return err;
281 }
282 
mlx5_sf_add(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,unsigned int * new_port_index)283 static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
284 		       const struct devlink_port_new_attrs *new_attr,
285 		       struct netlink_ext_ack *extack,
286 		       unsigned int *new_port_index)
287 {
288 	struct mlx5_eswitch *esw = dev->priv.eswitch;
289 	struct mlx5_sf *sf;
290 	int err;
291 
292 	sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
293 	if (IS_ERR(sf))
294 		return PTR_ERR(sf);
295 
296 	err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
297 						new_attr->controller, new_attr->sfnum);
298 	if (err)
299 		goto esw_err;
300 	*new_port_index = sf->port_index;
301 	trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
302 	return 0;
303 
304 esw_err:
305 	mlx5_sf_free(table, sf);
306 	return err;
307 }
308 
309 static int
mlx5_sf_new_check_attr(struct mlx5_core_dev * dev,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack)310 mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
311 		       struct netlink_ext_ack *extack)
312 {
313 	if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
314 		NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
315 		return -EOPNOTSUPP;
316 	}
317 	if (new_attr->port_index_valid) {
318 		NL_SET_ERR_MSG_MOD(extack,
319 				   "Driver does not support user defined port index assignment");
320 		return -EOPNOTSUPP;
321 	}
322 	if (!new_attr->sfnum_valid) {
323 		NL_SET_ERR_MSG_MOD(extack,
324 				   "User must provide unique sfnum. Driver does not support auto assignment");
325 		return -EOPNOTSUPP;
326 	}
327 	if (new_attr->controller_valid && new_attr->controller &&
328 	    !mlx5_core_is_ecpf_esw_manager(dev)) {
329 		NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
330 		return -EOPNOTSUPP;
331 	}
332 	if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
333 		NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
334 		return -EOPNOTSUPP;
335 	}
336 	return 0;
337 }
338 
mlx5_devlink_sf_port_new(struct devlink * devlink,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,unsigned int * new_port_index)339 int mlx5_devlink_sf_port_new(struct devlink *devlink,
340 			     const struct devlink_port_new_attrs *new_attr,
341 			     struct netlink_ext_ack *extack,
342 			     unsigned int *new_port_index)
343 {
344 	struct mlx5_core_dev *dev = devlink_priv(devlink);
345 	struct mlx5_sf_table *table;
346 	int err;
347 
348 	err = mlx5_sf_new_check_attr(dev, new_attr, extack);
349 	if (err)
350 		return err;
351 
352 	table = mlx5_sf_table_try_get(dev);
353 	if (!table) {
354 		NL_SET_ERR_MSG_MOD(extack,
355 				   "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
356 		return -EOPNOTSUPP;
357 	}
358 	err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index);
359 	mlx5_sf_table_put(table);
360 	return err;
361 }
362 
mlx5_sf_dealloc(struct mlx5_sf_table * table,struct mlx5_sf * sf)363 static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
364 {
365 	if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
366 		mlx5_sf_free(table, sf);
367 	} else if (mlx5_sf_is_active(sf)) {
368 		/* Even if its active, it is treated as in_use because by the time,
369 		 * it is disabled here, it may getting used. So it is safe to
370 		 * always look for the event to ensure that it is recycled only after
371 		 * firmware gives confirmation that it is detached by the driver.
372 		 */
373 		mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
374 		mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
375 		kfree(sf);
376 	} else {
377 		mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
378 		kfree(sf);
379 	}
380 }
381 
mlx5_devlink_sf_port_del(struct devlink * devlink,unsigned int port_index,struct netlink_ext_ack * extack)382 int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
383 			     struct netlink_ext_ack *extack)
384 {
385 	struct mlx5_core_dev *dev = devlink_priv(devlink);
386 	struct mlx5_eswitch *esw = dev->priv.eswitch;
387 	struct mlx5_sf_table *table;
388 	struct mlx5_sf *sf;
389 	int err = 0;
390 
391 	table = mlx5_sf_table_try_get(dev);
392 	if (!table) {
393 		NL_SET_ERR_MSG_MOD(extack,
394 				   "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
395 		return -EOPNOTSUPP;
396 	}
397 	sf = mlx5_sf_lookup_by_index(table, port_index);
398 	if (!sf) {
399 		err = -ENODEV;
400 		goto sf_err;
401 	}
402 
403 	mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
404 	mlx5_sf_id_erase(table, sf);
405 
406 	mutex_lock(&table->sf_state_lock);
407 	mlx5_sf_dealloc(table, sf);
408 	mutex_unlock(&table->sf_state_lock);
409 sf_err:
410 	mlx5_sf_table_put(table);
411 	return err;
412 }
413 
mlx5_sf_state_update_check(const struct mlx5_sf * sf,u8 new_state)414 static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
415 {
416 	if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
417 		return true;
418 
419 	if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
420 		return true;
421 
422 	if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
423 	    new_state == MLX5_VHCA_STATE_ALLOCATED)
424 		return true;
425 
426 	return false;
427 }
428 
mlx5_sf_vhca_event(struct notifier_block * nb,unsigned long opcode,void * data)429 static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
430 {
431 	struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
432 	const struct mlx5_vhca_state_event *event = data;
433 	bool update = false;
434 	struct mlx5_sf *sf;
435 
436 	table = mlx5_sf_table_try_get(table->dev);
437 	if (!table)
438 		return 0;
439 
440 	mutex_lock(&table->sf_state_lock);
441 	sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
442 	if (!sf)
443 		goto sf_err;
444 
445 	/* When driver is attached or detached to a function, an event
446 	 * notifies such state change.
447 	 */
448 	update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
449 	if (update)
450 		sf->hw_state = event->new_vhca_state;
451 	trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
452 				   sf->hw_fn_id, sf->hw_state);
453 sf_err:
454 	mutex_unlock(&table->sf_state_lock);
455 	mlx5_sf_table_put(table);
456 	return 0;
457 }
458 
mlx5_sf_table_enable(struct mlx5_sf_table * table)459 static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
460 {
461 	init_completion(&table->disable_complete);
462 	refcount_set(&table->refcount, 1);
463 }
464 
mlx5_sf_deactivate_all(struct mlx5_sf_table * table)465 static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
466 {
467 	struct mlx5_eswitch *esw = table->dev->priv.eswitch;
468 	unsigned long index;
469 	struct mlx5_sf *sf;
470 
471 	/* At this point, no new user commands can start and no vhca event can
472 	 * arrive. It is safe to destroy all user created SFs.
473 	 */
474 	xa_for_each(&table->port_indices, index, sf) {
475 		mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
476 		mlx5_sf_id_erase(table, sf);
477 		mlx5_sf_dealloc(table, sf);
478 	}
479 }
480 
mlx5_sf_table_disable(struct mlx5_sf_table * table)481 static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
482 {
483 	if (!refcount_read(&table->refcount))
484 		return;
485 
486 	/* Balances with refcount_set; drop the reference so that new user cmd cannot start
487 	 * and new vhca event handler cannot run.
488 	 */
489 	mlx5_sf_table_put(table);
490 	wait_for_completion(&table->disable_complete);
491 
492 	mlx5_sf_deactivate_all(table);
493 }
494 
mlx5_sf_esw_event(struct notifier_block * nb,unsigned long event,void * data)495 static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
496 {
497 	struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
498 	const struct mlx5_esw_event_info *mode = data;
499 
500 	switch (mode->new_mode) {
501 	case MLX5_ESWITCH_OFFLOADS:
502 		mlx5_sf_table_enable(table);
503 		break;
504 	case MLX5_ESWITCH_LEGACY:
505 		mlx5_sf_table_disable(table);
506 		break;
507 	default:
508 		break;
509 	}
510 
511 	return 0;
512 }
513 
mlx5_sf_table_supported(const struct mlx5_core_dev * dev)514 static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
515 {
516 	return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
517 	       mlx5_sf_hw_table_supported(dev);
518 }
519 
mlx5_sf_table_init(struct mlx5_core_dev * dev)520 int mlx5_sf_table_init(struct mlx5_core_dev *dev)
521 {
522 	struct mlx5_sf_table *table;
523 	int err;
524 
525 	if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
526 		return 0;
527 
528 	table = kzalloc(sizeof(*table), GFP_KERNEL);
529 	if (!table)
530 		return -ENOMEM;
531 
532 	mutex_init(&table->sf_state_lock);
533 	table->dev = dev;
534 	xa_init(&table->port_indices);
535 	dev->priv.sf_table = table;
536 	refcount_set(&table->refcount, 0);
537 	table->esw_nb.notifier_call = mlx5_sf_esw_event;
538 	err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
539 	if (err)
540 		goto reg_err;
541 
542 	table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
543 	err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
544 	if (err)
545 		goto vhca_err;
546 
547 	return 0;
548 
549 vhca_err:
550 	mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
551 reg_err:
552 	mutex_destroy(&table->sf_state_lock);
553 	kfree(table);
554 	dev->priv.sf_table = NULL;
555 	return err;
556 }
557 
mlx5_sf_table_cleanup(struct mlx5_core_dev * dev)558 void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
559 {
560 	struct mlx5_sf_table *table = dev->priv.sf_table;
561 
562 	if (!table)
563 		return;
564 
565 	mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
566 	mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
567 	WARN_ON(refcount_read(&table->refcount));
568 	mutex_destroy(&table->sf_state_lock);
569 	WARN_ON(!xa_empty(&table->port_indices));
570 	kfree(table);
571 }
572