• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 #include <linux/mlx5/driver.h>
4 #include "vhca_event.h"
5 #include "priv.h"
6 #include "sf.h"
7 #include "mlx5_ifc_vhca_event.h"
8 #include "ecpf.h"
9 #include "mlx5_core.h"
10 #include "eswitch.h"
11 
12 struct mlx5_sf_hw {
13 	u32 usr_sfnum;
14 	u8 allocated: 1;
15 	u8 pending_delete: 1;
16 };
17 
18 struct mlx5_sf_hwc_table {
19 	struct mlx5_sf_hw *sfs;
20 	int max_fn;
21 	u16 start_fn_id;
22 };
23 
24 enum mlx5_sf_hwc_index {
25 	MLX5_SF_HWC_LOCAL,
26 	MLX5_SF_HWC_EXTERNAL,
27 	MLX5_SF_HWC_MAX,
28 };
29 
30 struct mlx5_sf_hw_table {
31 	struct mlx5_core_dev *dev;
32 	struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
33 	struct notifier_block vhca_nb;
34 	struct mlx5_sf_hwc_table hwc[MLX5_SF_HWC_MAX];
35 };
36 
37 static struct mlx5_sf_hwc_table *
mlx5_sf_controller_to_hwc(struct mlx5_core_dev * dev,u32 controller)38 mlx5_sf_controller_to_hwc(struct mlx5_core_dev *dev, u32 controller)
39 {
40 	int idx = !!controller;
41 
42 	return &dev->priv.sf_hw_table->hwc[idx];
43 }
44 
mlx5_sf_sw_to_hw_id(struct mlx5_core_dev * dev,u32 controller,u16 sw_id)45 u16 mlx5_sf_sw_to_hw_id(struct mlx5_core_dev *dev, u32 controller, u16 sw_id)
46 {
47 	struct mlx5_sf_hwc_table *hwc;
48 
49 	hwc = mlx5_sf_controller_to_hwc(dev, controller);
50 	return hwc->start_fn_id + sw_id;
51 }
52 
mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table * hwc,u16 hw_id)53 static u16 mlx5_sf_hw_to_sw_id(struct mlx5_sf_hwc_table *hwc, u16 hw_id)
54 {
55 	return hw_id - hwc->start_fn_id;
56 }
57 
58 static struct mlx5_sf_hwc_table *
mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table * table,u16 fn_id)59 mlx5_sf_table_fn_to_hwc(struct mlx5_sf_hw_table *table, u16 fn_id)
60 {
61 	int i;
62 
63 	for (i = 0; i < ARRAY_SIZE(table->hwc); i++) {
64 		if (table->hwc[i].max_fn &&
65 		    fn_id >= table->hwc[i].start_fn_id &&
66 		    fn_id < (table->hwc[i].start_fn_id + table->hwc[i].max_fn))
67 			return &table->hwc[i];
68 	}
69 	return NULL;
70 }
71 
mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table * table,u32 controller,u32 usr_sfnum)72 static int mlx5_sf_hw_table_id_alloc(struct mlx5_sf_hw_table *table, u32 controller,
73 				     u32 usr_sfnum)
74 {
75 	struct mlx5_sf_hwc_table *hwc;
76 	int free_idx = -1;
77 	int i;
78 
79 	hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
80 	if (!hwc->sfs)
81 		return -ENOSPC;
82 
83 	for (i = 0; i < hwc->max_fn; i++) {
84 		if (!hwc->sfs[i].allocated && free_idx == -1) {
85 			free_idx = i;
86 			continue;
87 		}
88 
89 		if (hwc->sfs[i].allocated && hwc->sfs[i].usr_sfnum == usr_sfnum)
90 			return -EEXIST;
91 	}
92 
93 	if (free_idx == -1)
94 		return -ENOSPC;
95 
96 	hwc->sfs[free_idx].usr_sfnum = usr_sfnum;
97 	hwc->sfs[free_idx].allocated = true;
98 	return free_idx;
99 }
100 
mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table * table,u32 controller,int id)101 static void mlx5_sf_hw_table_id_free(struct mlx5_sf_hw_table *table, u32 controller, int id)
102 {
103 	struct mlx5_sf_hwc_table *hwc;
104 
105 	hwc = mlx5_sf_controller_to_hwc(table->dev, controller);
106 	hwc->sfs[id].allocated = false;
107 	hwc->sfs[id].pending_delete = false;
108 }
109 
mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev * dev,u32 controller,u32 usr_sfnum)110 int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr_sfnum)
111 {
112 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
113 	u16 hw_fn_id;
114 	int sw_id;
115 	int err;
116 
117 	if (!table)
118 		return -EOPNOTSUPP;
119 
120 	mutex_lock(&table->table_lock);
121 	sw_id = mlx5_sf_hw_table_id_alloc(table, controller, usr_sfnum);
122 	if (sw_id < 0) {
123 		err = sw_id;
124 		goto exist_err;
125 	}
126 
127 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, sw_id);
128 	err = mlx5_cmd_alloc_sf(dev, hw_fn_id);
129 	if (err)
130 		goto err;
131 
132 	err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum);
133 	if (err)
134 		goto vhca_err;
135 
136 	if (controller) {
137 		/* If this SF is for external controller, SF manager
138 		 * needs to arm firmware to receive the events.
139 		 */
140 		err = mlx5_vhca_event_arm(dev, hw_fn_id);
141 		if (err)
142 			goto vhca_err;
143 	}
144 
145 	mutex_unlock(&table->table_lock);
146 	return sw_id;
147 
148 vhca_err:
149 	mlx5_cmd_dealloc_sf(dev, hw_fn_id);
150 err:
151 	mlx5_sf_hw_table_id_free(table, controller, sw_id);
152 exist_err:
153 	mutex_unlock(&table->table_lock);
154 	return err;
155 }
156 
mlx5_sf_hw_table_sf_free(struct mlx5_core_dev * dev,u32 controller,u16 id)157 void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
158 {
159 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
160 	u16 hw_fn_id;
161 
162 	mutex_lock(&table->table_lock);
163 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
164 	mlx5_cmd_dealloc_sf(dev, hw_fn_id);
165 	mlx5_sf_hw_table_id_free(table, controller, id);
166 	mutex_unlock(&table->table_lock);
167 }
168 
mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev * dev,struct mlx5_sf_hwc_table * hwc,int idx)169 static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
170 					 struct mlx5_sf_hwc_table *hwc, int idx)
171 {
172 	mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
173 	hwc->sfs[idx].allocated = false;
174 	hwc->sfs[idx].pending_delete = false;
175 }
176 
mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev * dev,u32 controller,u16 id)177 void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
178 {
179 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
180 	u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
181 	struct mlx5_sf_hwc_table *hwc;
182 	u16 hw_fn_id;
183 	u8 state;
184 	int err;
185 
186 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, controller, id);
187 	hwc = mlx5_sf_controller_to_hwc(dev, controller);
188 	mutex_lock(&table->table_lock);
189 	err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
190 	if (err)
191 		goto err;
192 	state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
193 	if (state == MLX5_VHCA_STATE_ALLOCATED) {
194 		mlx5_cmd_dealloc_sf(dev, hw_fn_id);
195 		hwc->sfs[id].allocated = false;
196 	} else {
197 		hwc->sfs[id].pending_delete = true;
198 	}
199 err:
200 	mutex_unlock(&table->table_lock);
201 }
202 
mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev * dev,struct mlx5_sf_hwc_table * hwc)203 static void mlx5_sf_hw_table_hwc_dealloc_all(struct mlx5_core_dev *dev,
204 					     struct mlx5_sf_hwc_table *hwc)
205 {
206 	int i;
207 
208 	for (i = 0; i < hwc->max_fn; i++) {
209 		if (hwc->sfs[i].allocated)
210 			mlx5_sf_hw_table_hwc_sf_free(dev, hwc, i);
211 	}
212 }
213 
mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table * table)214 static void mlx5_sf_hw_table_dealloc_all(struct mlx5_sf_hw_table *table)
215 {
216 	mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_EXTERNAL]);
217 	mlx5_sf_hw_table_hwc_dealloc_all(table->dev, &table->hwc[MLX5_SF_HWC_LOCAL]);
218 }
219 
mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table * hwc,u16 max_fn,u16 base_id)220 static int mlx5_sf_hw_table_hwc_init(struct mlx5_sf_hwc_table *hwc, u16 max_fn, u16 base_id)
221 {
222 	struct mlx5_sf_hw *sfs;
223 
224 	if (!max_fn)
225 		return 0;
226 
227 	sfs = kcalloc(max_fn, sizeof(*sfs), GFP_KERNEL);
228 	if (!sfs)
229 		return -ENOMEM;
230 
231 	hwc->sfs = sfs;
232 	hwc->max_fn = max_fn;
233 	hwc->start_fn_id = base_id;
234 	return 0;
235 }
236 
mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table * hwc)237 static void mlx5_sf_hw_table_hwc_cleanup(struct mlx5_sf_hwc_table *hwc)
238 {
239 	kfree(hwc->sfs);
240 }
241 
mlx5_sf_hw_table_init(struct mlx5_core_dev * dev)242 int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
243 {
244 	struct mlx5_sf_hw_table *table;
245 	u16 max_ext_fn = 0;
246 	u16 ext_base_id;
247 	u16 max_fn = 0;
248 	u16 base_id;
249 	int err;
250 
251 	if (!mlx5_vhca_event_supported(dev))
252 		return 0;
253 
254 	if (mlx5_sf_supported(dev))
255 		max_fn = mlx5_sf_max_functions(dev);
256 
257 	err = mlx5_esw_sf_max_hpf_functions(dev, &max_ext_fn, &ext_base_id);
258 	if (err)
259 		return err;
260 
261 	if (!max_fn && !max_ext_fn)
262 		return 0;
263 
264 	table = kzalloc(sizeof(*table), GFP_KERNEL);
265 	if (!table)
266 		return -ENOMEM;
267 
268 	mutex_init(&table->table_lock);
269 	table->dev = dev;
270 	dev->priv.sf_hw_table = table;
271 
272 	base_id = mlx5_sf_start_function_id(dev);
273 	err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_LOCAL], max_fn, base_id);
274 	if (err)
275 		goto table_err;
276 
277 	err = mlx5_sf_hw_table_hwc_init(&table->hwc[MLX5_SF_HWC_EXTERNAL],
278 					max_ext_fn, ext_base_id);
279 	if (err)
280 		goto ext_err;
281 
282 	mlx5_core_dbg(dev, "SF HW table: max sfs = %d, ext sfs = %d\n", max_fn, max_ext_fn);
283 	return 0;
284 
285 ext_err:
286 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
287 table_err:
288 	mutex_destroy(&table->table_lock);
289 	kfree(table);
290 	return err;
291 }
292 
mlx5_sf_hw_table_cleanup(struct mlx5_core_dev * dev)293 void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
294 {
295 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
296 
297 	if (!table)
298 		return;
299 
300 	mutex_destroy(&table->table_lock);
301 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_EXTERNAL]);
302 	mlx5_sf_hw_table_hwc_cleanup(&table->hwc[MLX5_SF_HWC_LOCAL]);
303 	kfree(table);
304 }
305 
mlx5_sf_hw_vhca_event(struct notifier_block * nb,unsigned long opcode,void * data)306 static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
307 {
308 	struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
309 	const struct mlx5_vhca_state_event *event = data;
310 	struct mlx5_sf_hwc_table *hwc;
311 	struct mlx5_sf_hw *sf_hw;
312 	u16 sw_id;
313 
314 	if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
315 		return 0;
316 
317 	hwc = mlx5_sf_table_fn_to_hwc(table, event->function_id);
318 	if (!hwc)
319 		return 0;
320 
321 	sw_id = mlx5_sf_hw_to_sw_id(hwc, event->function_id);
322 	sf_hw = &hwc->sfs[sw_id];
323 
324 	mutex_lock(&table->table_lock);
325 	/* SF driver notified through firmware that SF is finally detached.
326 	 * Hence recycle the sf hardware id for reuse.
327 	 */
328 	if (sf_hw->allocated && sf_hw->pending_delete)
329 		mlx5_sf_hw_table_hwc_sf_free(table->dev, hwc, sw_id);
330 	mutex_unlock(&table->table_lock);
331 	return 0;
332 }
333 
mlx5_sf_hw_table_create(struct mlx5_core_dev * dev)334 int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
335 {
336 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
337 
338 	if (!table)
339 		return 0;
340 
341 	table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
342 	return mlx5_vhca_event_notifier_register(dev, &table->vhca_nb);
343 }
344 
mlx5_sf_hw_table_destroy(struct mlx5_core_dev * dev)345 void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
346 {
347 	struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
348 
349 	if (!table)
350 		return;
351 
352 	mlx5_vhca_event_notifier_unregister(dev, &table->vhca_nb);
353 	/* Dealloc SFs whose firmware event has been missed. */
354 	mlx5_sf_hw_table_dealloc_all(table);
355 }
356 
mlx5_sf_hw_table_supported(const struct mlx5_core_dev * dev)357 bool mlx5_sf_hw_table_supported(const struct mlx5_core_dev *dev)
358 {
359 	return !!dev->priv.sf_hw_table;
360 }
361