1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3
4 #include <linux/mlx5/driver.h>
5 #include "eswitch.h"
6 #include "priv.h"
7 #include "sf/dev/dev.h"
8 #include "mlx5_ifc_vhca_event.h"
9 #include "vhca_event.h"
10 #include "ecpf.h"
11
12 struct mlx5_sf {
13 struct devlink_port dl_port;
14 unsigned int port_index;
15 u32 controller;
16 u16 id;
17 u16 hw_fn_id;
18 u16 hw_state;
19 };
20
21 struct mlx5_sf_table {
22 struct mlx5_core_dev *dev; /* To refer from notifier context. */
23 struct xarray port_indices; /* port index based lookup. */
24 refcount_t refcount;
25 struct completion disable_complete;
26 struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
27 struct notifier_block esw_nb;
28 struct notifier_block vhca_nb;
29 u8 ecpu: 1;
30 };
31
32 static struct mlx5_sf *
mlx5_sf_lookup_by_index(struct mlx5_sf_table * table,unsigned int port_index)33 mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
34 {
35 return xa_load(&table->port_indices, port_index);
36 }
37
38 static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table * table,unsigned int fn_id)39 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
40 {
41 unsigned long index;
42 struct mlx5_sf *sf;
43
44 xa_for_each(&table->port_indices, index, sf) {
45 if (sf->hw_fn_id == fn_id)
46 return sf;
47 }
48 return NULL;
49 }
50
mlx5_sf_id_insert(struct mlx5_sf_table * table,struct mlx5_sf * sf)51 static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
52 {
53 return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
54 }
55
mlx5_sf_id_erase(struct mlx5_sf_table * table,struct mlx5_sf * sf)56 static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
57 {
58 xa_erase(&table->port_indices, sf->port_index);
59 }
60
61 static struct mlx5_sf *
mlx5_sf_alloc(struct mlx5_sf_table * table,struct mlx5_eswitch * esw,u32 controller,u32 sfnum,struct netlink_ext_ack * extack)62 mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
63 u32 controller, u32 sfnum, struct netlink_ext_ack *extack)
64 {
65 unsigned int dl_port_index;
66 struct mlx5_sf *sf;
67 u16 hw_fn_id;
68 int id_err;
69 int err;
70
71 if (!mlx5_esw_offloads_controller_valid(esw, controller)) {
72 NL_SET_ERR_MSG_MOD(extack, "Invalid controller number");
73 return ERR_PTR(-EINVAL);
74 }
75
76 id_err = mlx5_sf_hw_table_sf_alloc(table->dev, controller, sfnum);
77 if (id_err < 0) {
78 err = id_err;
79 goto id_err;
80 }
81
82 sf = kzalloc(sizeof(*sf), GFP_KERNEL);
83 if (!sf) {
84 err = -ENOMEM;
85 goto alloc_err;
86 }
87 sf->id = id_err;
88 hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, controller, sf->id);
89 dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
90 sf->port_index = dl_port_index;
91 sf->hw_fn_id = hw_fn_id;
92 sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
93 sf->controller = controller;
94
95 err = mlx5_sf_id_insert(table, sf);
96 if (err)
97 goto insert_err;
98
99 return sf;
100
101 insert_err:
102 kfree(sf);
103 alloc_err:
104 mlx5_sf_hw_table_sf_free(table->dev, controller, id_err);
105 id_err:
106 if (err == -EEXIST)
107 NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
108 return ERR_PTR(err);
109 }
110
mlx5_sf_free(struct mlx5_sf_table * table,struct mlx5_sf * sf)111 static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
112 {
113 mlx5_sf_id_erase(table, sf);
114 mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
115 kfree(sf);
116 }
117
mlx5_sf_table_try_get(struct mlx5_core_dev * dev)118 static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
119 {
120 struct mlx5_sf_table *table = dev->priv.sf_table;
121
122 if (!table)
123 return NULL;
124
125 return refcount_inc_not_zero(&table->refcount) ? table : NULL;
126 }
127
mlx5_sf_table_put(struct mlx5_sf_table * table)128 static void mlx5_sf_table_put(struct mlx5_sf_table *table)
129 {
130 if (refcount_dec_and_test(&table->refcount))
131 complete(&table->disable_complete);
132 }
133
mlx5_sf_to_devlink_state(u8 hw_state)134 static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
135 {
136 switch (hw_state) {
137 case MLX5_VHCA_STATE_ACTIVE:
138 case MLX5_VHCA_STATE_IN_USE:
139 return DEVLINK_PORT_FN_STATE_ACTIVE;
140 case MLX5_VHCA_STATE_INVALID:
141 case MLX5_VHCA_STATE_ALLOCATED:
142 case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
143 default:
144 return DEVLINK_PORT_FN_STATE_INACTIVE;
145 }
146 }
147
mlx5_sf_to_devlink_opstate(u8 hw_state)148 static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
149 {
150 switch (hw_state) {
151 case MLX5_VHCA_STATE_IN_USE:
152 case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
153 return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
154 case MLX5_VHCA_STATE_INVALID:
155 case MLX5_VHCA_STATE_ALLOCATED:
156 case MLX5_VHCA_STATE_ACTIVE:
157 default:
158 return DEVLINK_PORT_FN_OPSTATE_DETACHED;
159 }
160 }
161
mlx5_sf_is_active(const struct mlx5_sf * sf)162 static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
163 {
164 return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
165 }
166
mlx5_devlink_sf_port_fn_state_get(struct devlink_port * dl_port,enum devlink_port_fn_state * state,enum devlink_port_fn_opstate * opstate,struct netlink_ext_ack * extack)167 int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
168 enum devlink_port_fn_state *state,
169 enum devlink_port_fn_opstate *opstate,
170 struct netlink_ext_ack *extack)
171 {
172 struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
173 struct mlx5_sf_table *table;
174 struct mlx5_sf *sf;
175 int err = 0;
176
177 table = mlx5_sf_table_try_get(dev);
178 if (!table)
179 return -EOPNOTSUPP;
180
181 sf = mlx5_sf_lookup_by_index(table, dl_port->index);
182 if (!sf) {
183 err = -EOPNOTSUPP;
184 goto sf_err;
185 }
186 mutex_lock(&table->sf_state_lock);
187 *state = mlx5_sf_to_devlink_state(sf->hw_state);
188 *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
189 mutex_unlock(&table->sf_state_lock);
190 sf_err:
191 mlx5_sf_table_put(table);
192 return err;
193 }
194
mlx5_sf_activate(struct mlx5_core_dev * dev,struct mlx5_sf * sf,struct netlink_ext_ack * extack)195 static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
196 struct netlink_ext_ack *extack)
197 {
198 int err;
199
200 if (mlx5_sf_is_active(sf))
201 return 0;
202 if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
203 NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
204 return -EBUSY;
205 }
206
207 err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
208 if (err)
209 return err;
210
211 sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
212 return 0;
213 }
214
mlx5_sf_deactivate(struct mlx5_core_dev * dev,struct mlx5_sf * sf)215 static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
216 {
217 int err;
218
219 if (!mlx5_sf_is_active(sf))
220 return 0;
221
222 err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
223 if (err)
224 return err;
225
226 sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
227 return 0;
228 }
229
mlx5_sf_state_set(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,struct mlx5_sf * sf,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)230 static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
231 struct mlx5_sf *sf,
232 enum devlink_port_fn_state state,
233 struct netlink_ext_ack *extack)
234 {
235 int err = 0;
236
237 mutex_lock(&table->sf_state_lock);
238 if (state == mlx5_sf_to_devlink_state(sf->hw_state))
239 goto out;
240 if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
241 err = mlx5_sf_activate(dev, sf, extack);
242 else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
243 err = mlx5_sf_deactivate(dev, sf);
244 else
245 err = -EINVAL;
246 out:
247 mutex_unlock(&table->sf_state_lock);
248 return err;
249 }
250
mlx5_devlink_sf_port_fn_state_set(struct devlink_port * dl_port,enum devlink_port_fn_state state,struct netlink_ext_ack * extack)251 int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
252 enum devlink_port_fn_state state,
253 struct netlink_ext_ack *extack)
254 {
255 struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
256 struct mlx5_sf_table *table;
257 struct mlx5_sf *sf;
258 int err;
259
260 table = mlx5_sf_table_try_get(dev);
261 if (!table) {
262 NL_SET_ERR_MSG_MOD(extack,
263 "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
264 return -EOPNOTSUPP;
265 }
266 sf = mlx5_sf_lookup_by_index(table, dl_port->index);
267 if (!sf) {
268 err = -ENODEV;
269 goto out;
270 }
271
272 err = mlx5_sf_state_set(dev, table, sf, state, extack);
273 out:
274 mlx5_sf_table_put(table);
275 return err;
276 }
277
mlx5_sf_add(struct mlx5_core_dev * dev,struct mlx5_sf_table * table,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,unsigned int * new_port_index)278 static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
279 const struct devlink_port_new_attrs *new_attr,
280 struct netlink_ext_ack *extack,
281 unsigned int *new_port_index)
282 {
283 struct mlx5_eswitch *esw = dev->priv.eswitch;
284 struct mlx5_sf *sf;
285 int err;
286
287 sf = mlx5_sf_alloc(table, esw, new_attr->controller, new_attr->sfnum, extack);
288 if (IS_ERR(sf))
289 return PTR_ERR(sf);
290
291 err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, sf->hw_fn_id,
292 new_attr->controller, new_attr->sfnum);
293 if (err)
294 goto esw_err;
295 *new_port_index = sf->port_index;
296 return 0;
297
298 esw_err:
299 mlx5_sf_free(table, sf);
300 return err;
301 }
302
303 static int
mlx5_sf_new_check_attr(struct mlx5_core_dev * dev,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack)304 mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
305 struct netlink_ext_ack *extack)
306 {
307 if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
308 NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
309 return -EOPNOTSUPP;
310 }
311 if (new_attr->port_index_valid) {
312 NL_SET_ERR_MSG_MOD(extack,
313 "Driver does not support user defined port index assignment");
314 return -EOPNOTSUPP;
315 }
316 if (!new_attr->sfnum_valid) {
317 NL_SET_ERR_MSG_MOD(extack,
318 "User must provide unique sfnum. Driver does not support auto assignment");
319 return -EOPNOTSUPP;
320 }
321 if (new_attr->controller_valid && new_attr->controller &&
322 !mlx5_core_is_ecpf_esw_manager(dev)) {
323 NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
324 return -EOPNOTSUPP;
325 }
326 if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
327 NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
328 return -EOPNOTSUPP;
329 }
330 return 0;
331 }
332
mlx5_devlink_sf_port_new(struct devlink * devlink,const struct devlink_port_new_attrs * new_attr,struct netlink_ext_ack * extack,unsigned int * new_port_index)333 int mlx5_devlink_sf_port_new(struct devlink *devlink,
334 const struct devlink_port_new_attrs *new_attr,
335 struct netlink_ext_ack *extack,
336 unsigned int *new_port_index)
337 {
338 struct mlx5_core_dev *dev = devlink_priv(devlink);
339 struct mlx5_sf_table *table;
340 int err;
341
342 err = mlx5_sf_new_check_attr(dev, new_attr, extack);
343 if (err)
344 return err;
345
346 table = mlx5_sf_table_try_get(dev);
347 if (!table) {
348 NL_SET_ERR_MSG_MOD(extack,
349 "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
350 return -EOPNOTSUPP;
351 }
352 err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index);
353 mlx5_sf_table_put(table);
354 return err;
355 }
356
mlx5_sf_dealloc(struct mlx5_sf_table * table,struct mlx5_sf * sf)357 static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
358 {
359 if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
360 mlx5_sf_free(table, sf);
361 } else if (mlx5_sf_is_active(sf)) {
362 /* Even if its active, it is treated as in_use because by the time,
363 * it is disabled here, it may getting used. So it is safe to
364 * always look for the event to ensure that it is recycled only after
365 * firmware gives confirmation that it is detached by the driver.
366 */
367 mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
368 mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
369 kfree(sf);
370 } else {
371 mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
372 kfree(sf);
373 }
374 }
375
mlx5_devlink_sf_port_del(struct devlink * devlink,unsigned int port_index,struct netlink_ext_ack * extack)376 int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
377 struct netlink_ext_ack *extack)
378 {
379 struct mlx5_core_dev *dev = devlink_priv(devlink);
380 struct mlx5_eswitch *esw = dev->priv.eswitch;
381 struct mlx5_sf_table *table;
382 struct mlx5_sf *sf;
383 int err = 0;
384
385 table = mlx5_sf_table_try_get(dev);
386 if (!table) {
387 NL_SET_ERR_MSG_MOD(extack,
388 "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
389 return -EOPNOTSUPP;
390 }
391 sf = mlx5_sf_lookup_by_index(table, port_index);
392 if (!sf) {
393 err = -ENODEV;
394 goto sf_err;
395 }
396
397 mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
398 mlx5_sf_id_erase(table, sf);
399
400 mutex_lock(&table->sf_state_lock);
401 mlx5_sf_dealloc(table, sf);
402 mutex_unlock(&table->sf_state_lock);
403 sf_err:
404 mlx5_sf_table_put(table);
405 return err;
406 }
407
mlx5_sf_state_update_check(const struct mlx5_sf * sf,u8 new_state)408 static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
409 {
410 if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
411 return true;
412
413 if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
414 return true;
415
416 if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
417 new_state == MLX5_VHCA_STATE_ALLOCATED)
418 return true;
419
420 return false;
421 }
422
mlx5_sf_vhca_event(struct notifier_block * nb,unsigned long opcode,void * data)423 static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
424 {
425 struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
426 const struct mlx5_vhca_state_event *event = data;
427 bool update = false;
428 struct mlx5_sf *sf;
429
430 table = mlx5_sf_table_try_get(table->dev);
431 if (!table)
432 return 0;
433
434 mutex_lock(&table->sf_state_lock);
435 sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
436 if (!sf)
437 goto sf_err;
438
439 /* When driver is attached or detached to a function, an event
440 * notifies such state change.
441 */
442 update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
443 if (update)
444 sf->hw_state = event->new_vhca_state;
445 sf_err:
446 mutex_unlock(&table->sf_state_lock);
447 mlx5_sf_table_put(table);
448 return 0;
449 }
450
mlx5_sf_table_enable(struct mlx5_sf_table * table)451 static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
452 {
453 init_completion(&table->disable_complete);
454 refcount_set(&table->refcount, 1);
455 }
456
mlx5_sf_deactivate_all(struct mlx5_sf_table * table)457 static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
458 {
459 struct mlx5_eswitch *esw = table->dev->priv.eswitch;
460 unsigned long index;
461 struct mlx5_sf *sf;
462
463 /* At this point, no new user commands can start and no vhca event can
464 * arrive. It is safe to destroy all user created SFs.
465 */
466 xa_for_each(&table->port_indices, index, sf) {
467 mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
468 mlx5_sf_id_erase(table, sf);
469 mlx5_sf_dealloc(table, sf);
470 }
471 }
472
mlx5_sf_table_disable(struct mlx5_sf_table * table)473 static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
474 {
475 if (!refcount_read(&table->refcount))
476 return;
477
478 /* Balances with refcount_set; drop the reference so that new user cmd cannot start
479 * and new vhca event handler cannot run.
480 */
481 mlx5_sf_table_put(table);
482 wait_for_completion(&table->disable_complete);
483
484 mlx5_sf_deactivate_all(table);
485 }
486
mlx5_sf_esw_event(struct notifier_block * nb,unsigned long event,void * data)487 static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
488 {
489 struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
490 const struct mlx5_esw_event_info *mode = data;
491
492 switch (mode->new_mode) {
493 case MLX5_ESWITCH_OFFLOADS:
494 mlx5_sf_table_enable(table);
495 break;
496 case MLX5_ESWITCH_NONE:
497 mlx5_sf_table_disable(table);
498 break;
499 default:
500 break;
501 }
502
503 return 0;
504 }
505
mlx5_sf_table_supported(const struct mlx5_core_dev * dev)506 static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
507 {
508 return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
509 mlx5_sf_hw_table_supported(dev);
510 }
511
mlx5_sf_table_init(struct mlx5_core_dev * dev)512 int mlx5_sf_table_init(struct mlx5_core_dev *dev)
513 {
514 struct mlx5_sf_table *table;
515 int err;
516
517 if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
518 return 0;
519
520 table = kzalloc(sizeof(*table), GFP_KERNEL);
521 if (!table)
522 return -ENOMEM;
523
524 mutex_init(&table->sf_state_lock);
525 table->dev = dev;
526 xa_init(&table->port_indices);
527 dev->priv.sf_table = table;
528 refcount_set(&table->refcount, 0);
529 table->esw_nb.notifier_call = mlx5_sf_esw_event;
530 err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
531 if (err)
532 goto reg_err;
533
534 table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
535 err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
536 if (err)
537 goto vhca_err;
538
539 return 0;
540
541 vhca_err:
542 mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
543 reg_err:
544 mutex_destroy(&table->sf_state_lock);
545 kfree(table);
546 dev->priv.sf_table = NULL;
547 return err;
548 }
549
mlx5_sf_table_cleanup(struct mlx5_core_dev * dev)550 void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
551 {
552 struct mlx5_sf_table *table = dev->priv.sf_table;
553
554 if (!table)
555 return;
556
557 mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
558 mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
559 WARN_ON(refcount_read(&table->refcount));
560 mutex_destroy(&table->sf_state_lock);
561 WARN_ON(!xa_empty(&table->port_indices));
562 kfree(table);
563 }
564