1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/mlx5/eswitch.h>
5 #include <linux/err.h>
6 #include "dr_types.h"
7
8 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
9 ((dmn)->info.caps.dmn_type##_sw_owner || \
10 ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
11 (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
12
dr_domain_init_cache(struct mlx5dr_domain * dmn)13 static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
14 {
15 /* Per vport cached FW FT for checksum recalculation, this
16 * recalculation is needed due to a HW bug.
17 */
18 dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
19 sizeof(dmn->cache.recalc_cs_ft[0]),
20 GFP_KERNEL);
21 if (!dmn->cache.recalc_cs_ft)
22 return -ENOMEM;
23
24 return 0;
25 }
26
dr_domain_uninit_cache(struct mlx5dr_domain * dmn)27 static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
28 {
29 int i;
30
31 for (i = 0; i < dmn->info.caps.num_vports; i++) {
32 if (!dmn->cache.recalc_cs_ft[i])
33 continue;
34
35 mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
36 }
37
38 kfree(dmn->cache.recalc_cs_ft);
39 }
40
mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain * dmn,u32 vport_num,u64 * rx_icm_addr)41 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
42 u32 vport_num,
43 u64 *rx_icm_addr)
44 {
45 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
46
47 recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
48 if (!recalc_cs_ft) {
49 /* Table not in cache, need to allocate a new one */
50 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
51 if (!recalc_cs_ft)
52 return -EINVAL;
53
54 dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
55 }
56
57 *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
58
59 return 0;
60 }
61
dr_domain_init_resources(struct mlx5dr_domain * dmn)62 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
63 {
64 int ret;
65
66 ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
67 if (ret) {
68 mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
69 return ret;
70 }
71
72 dmn->uar = mlx5_get_uars_page(dmn->mdev);
73 if (IS_ERR(dmn->uar)) {
74 mlx5dr_err(dmn, "Couldn't allocate UAR\n");
75 ret = PTR_ERR(dmn->uar);
76 goto clean_pd;
77 }
78
79 dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
80 if (!dmn->ste_icm_pool) {
81 mlx5dr_err(dmn, "Couldn't get icm memory\n");
82 ret = -ENOMEM;
83 goto clean_uar;
84 }
85
86 dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
87 if (!dmn->action_icm_pool) {
88 mlx5dr_err(dmn, "Couldn't get action icm memory\n");
89 ret = -ENOMEM;
90 goto free_ste_icm_pool;
91 }
92
93 ret = mlx5dr_send_ring_alloc(dmn);
94 if (ret) {
95 mlx5dr_err(dmn, "Couldn't create send-ring\n");
96 goto free_action_icm_pool;
97 }
98
99 return 0;
100
101 free_action_icm_pool:
102 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
103 free_ste_icm_pool:
104 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
105 clean_uar:
106 mlx5_put_uars_page(dmn->mdev, dmn->uar);
107 clean_pd:
108 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
109
110 return ret;
111 }
112
dr_domain_uninit_resources(struct mlx5dr_domain * dmn)113 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
114 {
115 mlx5dr_send_ring_free(dmn, dmn->send_ring);
116 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
117 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
118 mlx5_put_uars_page(dmn->mdev, dmn->uar);
119 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
120 }
121
dr_domain_query_vport(struct mlx5dr_domain * dmn,bool other_vport,u16 vport_number)122 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
123 bool other_vport,
124 u16 vport_number)
125 {
126 struct mlx5dr_cmd_vport_cap *vport_caps;
127 int ret;
128
129 vport_caps = &dmn->info.caps.vports_caps[vport_number];
130
131 ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
132 other_vport,
133 vport_number,
134 &vport_caps->icm_address_rx,
135 &vport_caps->icm_address_tx);
136 if (ret)
137 return ret;
138
139 ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
140 other_vport,
141 vport_number,
142 &vport_caps->vport_gvmi);
143 if (ret)
144 return ret;
145
146 vport_caps->num = vport_number;
147 vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
148
149 return 0;
150 }
151
dr_domain_query_vports(struct mlx5dr_domain * dmn)152 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
153 {
154 struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
155 struct mlx5dr_cmd_vport_cap *wire_vport;
156 int vport;
157 int ret;
158
159 /* Query vports (except wire vport) */
160 for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
161 ret = dr_domain_query_vport(dmn, !!vport, vport);
162 if (ret)
163 return ret;
164 }
165
166 /* Last vport is the wire port */
167 wire_vport = &dmn->info.caps.vports_caps[vport];
168 wire_vport->num = WIRE_PORT;
169 wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
170 wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
171 wire_vport->vport_gvmi = 0;
172 wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
173
174 return 0;
175 }
176
dr_domain_query_fdb_caps(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)177 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
178 struct mlx5dr_domain *dmn)
179 {
180 int ret;
181
182 if (!dmn->info.caps.eswitch_manager)
183 return -EOPNOTSUPP;
184
185 ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
186 if (ret)
187 return ret;
188
189 dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
190 dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
191 dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
192 dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
193
194 dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
195 sizeof(dmn->info.caps.vports_caps[0]),
196 GFP_KERNEL);
197 if (!dmn->info.caps.vports_caps)
198 return -ENOMEM;
199
200 ret = dr_domain_query_vports(dmn);
201 if (ret) {
202 mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
203 goto free_vports_caps;
204 }
205
206 dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
207
208 return 0;
209
210 free_vports_caps:
211 kfree(dmn->info.caps.vports_caps);
212 dmn->info.caps.vports_caps = NULL;
213 return ret;
214 }
215
dr_domain_caps_init(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)216 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
217 struct mlx5dr_domain *dmn)
218 {
219 struct mlx5dr_cmd_vport_cap *vport_cap;
220 int ret;
221
222 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
223 mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
224 return -EOPNOTSUPP;
225 }
226
227 dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
228
229 ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
230 if (ret)
231 return ret;
232
233 ret = dr_domain_query_fdb_caps(mdev, dmn);
234 if (ret)
235 return ret;
236
237 switch (dmn->type) {
238 case MLX5DR_DOMAIN_TYPE_NIC_RX:
239 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
240 return -ENOTSUPP;
241
242 dmn->info.supp_sw_steering = true;
243 dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
244 dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
245 dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
246 break;
247 case MLX5DR_DOMAIN_TYPE_NIC_TX:
248 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
249 return -ENOTSUPP;
250
251 dmn->info.supp_sw_steering = true;
252 dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
253 dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
254 dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
255 break;
256 case MLX5DR_DOMAIN_TYPE_FDB:
257 if (!dmn->info.caps.eswitch_manager)
258 return -ENOTSUPP;
259
260 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
261 return -ENOTSUPP;
262
263 dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
264 dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX;
265 vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
266 if (!vport_cap) {
267 mlx5dr_err(dmn, "Failed to get esw manager vport\n");
268 return -ENOENT;
269 }
270
271 dmn->info.supp_sw_steering = true;
272 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
273 dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
274 dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
275 dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
276 break;
277 default:
278 mlx5dr_err(dmn, "Invalid domain\n");
279 ret = -EINVAL;
280 break;
281 }
282
283 return ret;
284 }
285
dr_domain_caps_uninit(struct mlx5dr_domain * dmn)286 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
287 {
288 kfree(dmn->info.caps.vports_caps);
289 }
290
291 struct mlx5dr_domain *
mlx5dr_domain_create(struct mlx5_core_dev * mdev,enum mlx5dr_domain_type type)292 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
293 {
294 struct mlx5dr_domain *dmn;
295 int ret;
296
297 if (type > MLX5DR_DOMAIN_TYPE_FDB)
298 return NULL;
299
300 dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
301 if (!dmn)
302 return NULL;
303
304 dmn->mdev = mdev;
305 dmn->type = type;
306 refcount_set(&dmn->refcount, 1);
307 mutex_init(&dmn->info.rx.mutex);
308 mutex_init(&dmn->info.tx.mutex);
309
310 if (dr_domain_caps_init(mdev, dmn)) {
311 mlx5dr_err(dmn, "Failed init domain, no caps\n");
312 goto free_domain;
313 }
314
315 dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
316 dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
317 dmn->info.caps.log_icm_size);
318
319 if (!dmn->info.supp_sw_steering) {
320 mlx5dr_err(dmn, "SW steering is not supported\n");
321 goto uninit_caps;
322 }
323
324 /* Allocate resources */
325 ret = dr_domain_init_resources(dmn);
326 if (ret) {
327 mlx5dr_err(dmn, "Failed init domain resources\n");
328 goto uninit_caps;
329 }
330
331 ret = dr_domain_init_cache(dmn);
332 if (ret) {
333 mlx5dr_err(dmn, "Failed initialize domain cache\n");
334 goto uninit_resourses;
335 }
336
337 return dmn;
338
339 uninit_resourses:
340 dr_domain_uninit_resources(dmn);
341 uninit_caps:
342 dr_domain_caps_uninit(dmn);
343 free_domain:
344 kfree(dmn);
345 return NULL;
346 }
347
348 /* Assure synchronization of the device steering tables with updates made by SW
349 * insertion.
350 */
mlx5dr_domain_sync(struct mlx5dr_domain * dmn,u32 flags)351 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
352 {
353 int ret = 0;
354
355 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
356 mlx5dr_domain_lock(dmn);
357 ret = mlx5dr_send_ring_force_drain(dmn);
358 mlx5dr_domain_unlock(dmn);
359 if (ret) {
360 mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
361 flags, ret);
362 return ret;
363 }
364 }
365
366 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
367 ret = mlx5dr_cmd_sync_steering(dmn->mdev);
368
369 return ret;
370 }
371
mlx5dr_domain_destroy(struct mlx5dr_domain * dmn)372 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
373 {
374 if (refcount_read(&dmn->refcount) > 1)
375 return -EBUSY;
376
377 /* make sure resources are not used by the hardware */
378 mlx5dr_cmd_sync_steering(dmn->mdev);
379 dr_domain_uninit_cache(dmn);
380 dr_domain_uninit_resources(dmn);
381 dr_domain_caps_uninit(dmn);
382 mutex_destroy(&dmn->info.tx.mutex);
383 mutex_destroy(&dmn->info.rx.mutex);
384 kfree(dmn);
385 return 0;
386 }
387
mlx5dr_domain_set_peer(struct mlx5dr_domain * dmn,struct mlx5dr_domain * peer_dmn)388 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
389 struct mlx5dr_domain *peer_dmn)
390 {
391 mlx5dr_domain_lock(dmn);
392
393 if (dmn->peer_dmn)
394 refcount_dec(&dmn->peer_dmn->refcount);
395
396 dmn->peer_dmn = peer_dmn;
397
398 if (dmn->peer_dmn)
399 refcount_inc(&dmn->peer_dmn->refcount);
400
401 mlx5dr_domain_unlock(dmn);
402 }
403