1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include <linux/mlx5/eswitch.h>
5 #include <linux/err.h>
6 #include "dr_types.h"
7
8 #define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
9 ((dmn)->info.caps.dmn_type##_sw_owner || \
10 ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
11 (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
12
dr_domain_init_cache(struct mlx5dr_domain * dmn)13 static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
14 {
15 /* Per vport cached FW FT for checksum recalculation, this
16 * recalculation is needed due to a HW bug.
17 */
18 dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
19 sizeof(dmn->cache.recalc_cs_ft[0]),
20 GFP_KERNEL);
21 if (!dmn->cache.recalc_cs_ft)
22 return -ENOMEM;
23
24 return 0;
25 }
26
dr_domain_uninit_cache(struct mlx5dr_domain * dmn)27 static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
28 {
29 int i;
30
31 for (i = 0; i < dmn->info.caps.num_vports; i++) {
32 if (!dmn->cache.recalc_cs_ft[i])
33 continue;
34
35 mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
36 }
37
38 kfree(dmn->cache.recalc_cs_ft);
39 }
40
mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain * dmn,u32 vport_num,u64 * rx_icm_addr)41 int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
42 u32 vport_num,
43 u64 *rx_icm_addr)
44 {
45 struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
46
47 recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
48 if (!recalc_cs_ft) {
49 /* Table not in cache, need to allocate a new one */
50 recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
51 if (!recalc_cs_ft)
52 return -EINVAL;
53
54 dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
55 }
56
57 *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
58
59 return 0;
60 }
61
dr_domain_init_resources(struct mlx5dr_domain * dmn)62 static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
63 {
64 int ret;
65
66 dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
67 if (!dmn->ste_ctx) {
68 mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
69 return -EOPNOTSUPP;
70 }
71
72 ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
73 if (ret) {
74 mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
75 return ret;
76 }
77
78 dmn->uar = mlx5_get_uars_page(dmn->mdev);
79 if (IS_ERR(dmn->uar)) {
80 mlx5dr_err(dmn, "Couldn't allocate UAR\n");
81 ret = PTR_ERR(dmn->uar);
82 goto clean_pd;
83 }
84
85 dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
86 if (!dmn->ste_icm_pool) {
87 mlx5dr_err(dmn, "Couldn't get icm memory\n");
88 ret = -ENOMEM;
89 goto clean_uar;
90 }
91
92 dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
93 if (!dmn->action_icm_pool) {
94 mlx5dr_err(dmn, "Couldn't get action icm memory\n");
95 ret = -ENOMEM;
96 goto free_ste_icm_pool;
97 }
98
99 ret = mlx5dr_send_ring_alloc(dmn);
100 if (ret) {
101 mlx5dr_err(dmn, "Couldn't create send-ring\n");
102 goto free_action_icm_pool;
103 }
104
105 return 0;
106
107 free_action_icm_pool:
108 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
109 free_ste_icm_pool:
110 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
111 clean_uar:
112 mlx5_put_uars_page(dmn->mdev, dmn->uar);
113 clean_pd:
114 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
115
116 return ret;
117 }
118
dr_domain_uninit_resources(struct mlx5dr_domain * dmn)119 static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
120 {
121 mlx5dr_send_ring_free(dmn, dmn->send_ring);
122 mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
123 mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
124 mlx5_put_uars_page(dmn->mdev, dmn->uar);
125 mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
126 }
127
dr_domain_query_vport(struct mlx5dr_domain * dmn,bool other_vport,u16 vport_number)128 static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
129 bool other_vport,
130 u16 vport_number)
131 {
132 struct mlx5dr_cmd_vport_cap *vport_caps;
133 int ret;
134
135 vport_caps = &dmn->info.caps.vports_caps[vport_number];
136
137 ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
138 other_vport,
139 vport_number,
140 &vport_caps->icm_address_rx,
141 &vport_caps->icm_address_tx);
142 if (ret)
143 return ret;
144
145 ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
146 other_vport,
147 vport_number,
148 &vport_caps->vport_gvmi);
149 if (ret)
150 return ret;
151
152 vport_caps->num = vport_number;
153 vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
154
155 return 0;
156 }
157
dr_domain_query_vports(struct mlx5dr_domain * dmn)158 static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
159 {
160 struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
161 struct mlx5dr_cmd_vport_cap *wire_vport;
162 int vport;
163 int ret;
164
165 /* Query vports (except wire vport) */
166 for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
167 ret = dr_domain_query_vport(dmn, !!vport, vport);
168 if (ret)
169 return ret;
170 }
171
172 /* Last vport is the wire port */
173 wire_vport = &dmn->info.caps.vports_caps[vport];
174 wire_vport->num = MLX5_VPORT_UPLINK;
175 wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
176 wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
177 wire_vport->vport_gvmi = 0;
178 wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
179
180 return 0;
181 }
182
dr_domain_query_fdb_caps(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)183 static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
184 struct mlx5dr_domain *dmn)
185 {
186 int ret;
187
188 if (!dmn->info.caps.eswitch_manager)
189 return -EOPNOTSUPP;
190
191 ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
192 if (ret)
193 return ret;
194
195 dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
196 dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
197 dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
198 dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
199
200 dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
201 sizeof(dmn->info.caps.vports_caps[0]),
202 GFP_KERNEL);
203 if (!dmn->info.caps.vports_caps)
204 return -ENOMEM;
205
206 ret = dr_domain_query_vports(dmn);
207 if (ret) {
208 mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
209 goto free_vports_caps;
210 }
211
212 dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
213
214 return 0;
215
216 free_vports_caps:
217 kfree(dmn->info.caps.vports_caps);
218 dmn->info.caps.vports_caps = NULL;
219 return ret;
220 }
221
dr_domain_caps_init(struct mlx5_core_dev * mdev,struct mlx5dr_domain * dmn)222 static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
223 struct mlx5dr_domain *dmn)
224 {
225 struct mlx5dr_cmd_vport_cap *vport_cap;
226 int ret;
227
228 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
229 mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
230 return -EOPNOTSUPP;
231 }
232
233 dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
234
235 ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
236 if (ret)
237 return ret;
238
239 ret = dr_domain_query_fdb_caps(mdev, dmn);
240 if (ret)
241 return ret;
242
243 switch (dmn->type) {
244 case MLX5DR_DOMAIN_TYPE_NIC_RX:
245 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
246 return -ENOTSUPP;
247
248 dmn->info.supp_sw_steering = true;
249 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
250 dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
251 dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
252 break;
253 case MLX5DR_DOMAIN_TYPE_NIC_TX:
254 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
255 return -ENOTSUPP;
256
257 dmn->info.supp_sw_steering = true;
258 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
259 dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
260 dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
261 break;
262 case MLX5DR_DOMAIN_TYPE_FDB:
263 if (!dmn->info.caps.eswitch_manager)
264 return -ENOTSUPP;
265
266 if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
267 return -ENOTSUPP;
268
269 dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
270 dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
271 vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
272 if (!vport_cap) {
273 mlx5dr_err(dmn, "Failed to get esw manager vport\n");
274 return -ENOENT;
275 }
276
277 dmn->info.supp_sw_steering = true;
278 dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
279 dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
280 dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
281 dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
282 break;
283 default:
284 mlx5dr_err(dmn, "Invalid domain\n");
285 ret = -EINVAL;
286 break;
287 }
288
289 return ret;
290 }
291
dr_domain_caps_uninit(struct mlx5dr_domain * dmn)292 static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
293 {
294 kfree(dmn->info.caps.vports_caps);
295 }
296
297 struct mlx5dr_domain *
mlx5dr_domain_create(struct mlx5_core_dev * mdev,enum mlx5dr_domain_type type)298 mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
299 {
300 struct mlx5dr_domain *dmn;
301 int ret;
302
303 if (type > MLX5DR_DOMAIN_TYPE_FDB)
304 return NULL;
305
306 dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
307 if (!dmn)
308 return NULL;
309
310 dmn->mdev = mdev;
311 dmn->type = type;
312 refcount_set(&dmn->refcount, 1);
313 mutex_init(&dmn->info.rx.mutex);
314 mutex_init(&dmn->info.tx.mutex);
315
316 if (dr_domain_caps_init(mdev, dmn)) {
317 mlx5dr_err(dmn, "Failed init domain, no caps\n");
318 goto free_domain;
319 }
320
321 dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
322 dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
323 dmn->info.caps.log_icm_size);
324
325 if (!dmn->info.supp_sw_steering) {
326 mlx5dr_err(dmn, "SW steering is not supported\n");
327 goto uninit_caps;
328 }
329
330 /* Allocate resources */
331 ret = dr_domain_init_resources(dmn);
332 if (ret) {
333 mlx5dr_err(dmn, "Failed init domain resources\n");
334 goto uninit_caps;
335 }
336
337 ret = dr_domain_init_cache(dmn);
338 if (ret) {
339 mlx5dr_err(dmn, "Failed initialize domain cache\n");
340 goto uninit_resourses;
341 }
342
343 return dmn;
344
345 uninit_resourses:
346 dr_domain_uninit_resources(dmn);
347 uninit_caps:
348 dr_domain_caps_uninit(dmn);
349 free_domain:
350 kfree(dmn);
351 return NULL;
352 }
353
354 /* Assure synchronization of the device steering tables with updates made by SW
355 * insertion.
356 */
mlx5dr_domain_sync(struct mlx5dr_domain * dmn,u32 flags)357 int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
358 {
359 int ret = 0;
360
361 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
362 mlx5dr_domain_lock(dmn);
363 ret = mlx5dr_send_ring_force_drain(dmn);
364 mlx5dr_domain_unlock(dmn);
365 if (ret) {
366 mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
367 flags, ret);
368 return ret;
369 }
370 }
371
372 if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
373 ret = mlx5dr_cmd_sync_steering(dmn->mdev);
374
375 return ret;
376 }
377
mlx5dr_domain_destroy(struct mlx5dr_domain * dmn)378 int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
379 {
380 if (refcount_read(&dmn->refcount) > 1)
381 return -EBUSY;
382
383 /* make sure resources are not used by the hardware */
384 mlx5dr_cmd_sync_steering(dmn->mdev);
385 dr_domain_uninit_cache(dmn);
386 dr_domain_uninit_resources(dmn);
387 dr_domain_caps_uninit(dmn);
388 mutex_destroy(&dmn->info.tx.mutex);
389 mutex_destroy(&dmn->info.rx.mutex);
390 kfree(dmn);
391 return 0;
392 }
393
mlx5dr_domain_set_peer(struct mlx5dr_domain * dmn,struct mlx5dr_domain * peer_dmn)394 void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
395 struct mlx5dr_domain *peer_dmn)
396 {
397 mlx5dr_domain_lock(dmn);
398
399 if (dmn->peer_dmn)
400 refcount_dec(&dmn->peer_dmn->refcount);
401
402 dmn->peer_dmn = peer_dmn;
403
404 if (dmn->peer_dmn)
405 refcount_inc(&dmn->peer_dmn->refcount);
406
407 mlx5dr_domain_unlock(dmn);
408 }
409