1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
7 #include <linux/if_vlan.h>
8
9 #include "en.h"
10 #include "lib/aso.h"
11 #include "lib/crypto.h"
12 #include "en_accel/macsec.h"
13
14 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
15 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
16
17 enum mlx5_macsec_aso_event_arm {
18 MLX5E_ASO_EPN_ARM = BIT(0),
19 };
20
21 enum {
22 MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
23 };
24
25 struct mlx5e_macsec_handle {
26 struct mlx5e_macsec *macsec;
27 u32 obj_id;
28 u8 idx;
29 };
30
31 enum {
32 MLX5_MACSEC_EPN,
33 };
34
35 struct mlx5e_macsec_aso_out {
36 u8 event_arm;
37 u32 mode_param;
38 };
39
40 struct mlx5e_macsec_aso_in {
41 u8 mode;
42 u32 obj_id;
43 };
44
45 struct mlx5e_macsec_epn_state {
46 u32 epn_msb;
47 u8 epn_enabled;
48 u8 overlap;
49 };
50
51 struct mlx5e_macsec_async_work {
52 struct mlx5e_macsec *macsec;
53 struct mlx5_core_dev *mdev;
54 struct work_struct work;
55 u32 obj_id;
56 };
57
58 struct mlx5e_macsec_sa {
59 bool active;
60 u8 assoc_num;
61 u32 macsec_obj_id;
62 u32 enc_key_id;
63 u32 next_pn;
64 sci_t sci;
65 ssci_t ssci;
66 salt_t salt;
67
68 union mlx5_macsec_rule *macsec_rule;
69 struct rcu_head rcu_head;
70 struct mlx5e_macsec_epn_state epn_state;
71 };
72
73 struct mlx5e_macsec_rx_sc;
74 struct mlx5e_macsec_rx_sc_xarray_element {
75 u32 fs_id;
76 struct mlx5e_macsec_rx_sc *rx_sc;
77 };
78
79 struct mlx5e_macsec_rx_sc {
80 bool active;
81 sci_t sci;
82 struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
83 struct list_head rx_sc_list_element;
84 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
85 struct metadata_dst *md_dst;
86 struct rcu_head rcu_head;
87 };
88
89 struct mlx5e_macsec_umr {
90 u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
91 dma_addr_t dma_addr;
92 u32 mkey;
93 };
94
95 struct mlx5e_macsec_aso {
96 /* ASO */
97 struct mlx5_aso *maso;
98 /* Protects macsec ASO */
99 struct mutex aso_lock;
100 /* UMR */
101 struct mlx5e_macsec_umr *umr;
102
103 u32 pdn;
104 };
105
106 struct mlx5e_macsec_device {
107 const struct net_device *netdev;
108 struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
109 struct list_head macsec_rx_sc_list_head;
110 unsigned char *dev_addr;
111 struct list_head macsec_device_list_element;
112 };
113
114 struct mlx5e_macsec {
115 struct list_head macsec_device_list_head;
116 int num_of_devices;
117 struct mutex lock; /* Protects mlx5e_macsec internal contexts */
118
119 /* Rx fs_id -> rx_sc mapping */
120 struct xarray sc_xarray;
121
122 struct mlx5_core_dev *mdev;
123
124 /* ASO */
125 struct mlx5e_macsec_aso aso;
126
127 struct notifier_block nb;
128 struct workqueue_struct *wq;
129 };
130
131 struct mlx5_macsec_obj_attrs {
132 u32 aso_pdn;
133 u32 next_pn;
134 __be64 sci;
135 u32 enc_key_id;
136 bool encrypt;
137 struct mlx5e_macsec_epn_state epn_state;
138 salt_t salt;
139 __be32 ssci;
140 bool replay_protect;
141 u32 replay_window;
142 };
143
144 struct mlx5_aso_ctrl_param {
145 u8 data_mask_mode;
146 u8 condition_0_operand;
147 u8 condition_1_operand;
148 u8 condition_0_offset;
149 u8 condition_1_offset;
150 u8 data_offset;
151 u8 condition_operand;
152 u32 condition_0_data;
153 u32 condition_0_mask;
154 u32 condition_1_data;
155 u32 condition_1_mask;
156 u64 bitwise_data;
157 u64 data_mask;
158 };
159
mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)160 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
161 {
162 struct mlx5e_macsec_umr *umr;
163 struct device *dma_device;
164 dma_addr_t dma_addr;
165 int err;
166
167 umr = kzalloc(sizeof(*umr), GFP_KERNEL);
168 if (!umr) {
169 err = -ENOMEM;
170 return err;
171 }
172
173 dma_device = mlx5_core_dma_dev(mdev);
174 dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
175 err = dma_mapping_error(dma_device, dma_addr);
176 if (err) {
177 mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
178 goto out_dma;
179 }
180
181 err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
182 if (err) {
183 mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
184 goto out_mkey;
185 }
186
187 umr->dma_addr = dma_addr;
188
189 aso->umr = umr;
190
191 return 0;
192
193 out_mkey:
194 dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
195 out_dma:
196 kfree(umr);
197 return err;
198 }
199
mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)200 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
201 {
202 struct mlx5e_macsec_umr *umr = aso->umr;
203
204 mlx5_core_destroy_mkey(mdev, umr->mkey);
205 dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
206 kfree(umr);
207 }
208
macsec_set_replay_protection(struct mlx5_macsec_obj_attrs * attrs,void * aso_ctx)209 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
210 {
211 u8 window_sz;
212
213 if (!attrs->replay_protect)
214 return 0;
215
216 switch (attrs->replay_window) {
217 case 256:
218 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
219 break;
220 case 128:
221 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
222 break;
223 case 64:
224 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
225 break;
226 case 32:
227 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
228 break;
229 default:
230 return -EINVAL;
231 }
232 MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
233 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
234
235 return 0;
236 }
237
mlx5e_macsec_create_object(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,bool is_tx,u32 * macsec_obj_id)238 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
239 struct mlx5_macsec_obj_attrs *attrs,
240 bool is_tx,
241 u32 *macsec_obj_id)
242 {
243 u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
244 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
245 void *aso_ctx;
246 void *obj;
247 int err;
248
249 obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
250 aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
251
252 MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
253 MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
254 MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
255 MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
256 MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
257
258 /* Epn */
259 if (attrs->epn_state.epn_enabled) {
260 void *salt_p;
261 int i;
262
263 MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
264 MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
265 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
266 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
267 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
268 salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
269 for (i = 0; i < 3 ; i++)
270 memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
271 } else {
272 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
273 }
274
275 MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
276 if (is_tx) {
277 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
278 } else {
279 err = macsec_set_replay_protection(attrs, aso_ctx);
280 if (err)
281 return err;
282 }
283
284 /* general object fields set */
285 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
286 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
287
288 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
289 if (err) {
290 mlx5_core_err(mdev,
291 "MACsec offload: Failed to create MACsec object (err = %d)\n",
292 err);
293 return err;
294 }
295
296 *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
297
298 return err;
299 }
300
mlx5e_macsec_destroy_object(struct mlx5_core_dev * mdev,u32 macsec_obj_id)301 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
302 {
303 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
304 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
305
306 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
307 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
308 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
309
310 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
311 }
312
mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec * macsec,struct mlx5e_macsec_sa * sa,bool is_tx,struct net_device * netdev,u32 fs_id)313 static void mlx5e_macsec_cleanup_sa_fs(struct mlx5e_macsec *macsec,
314 struct mlx5e_macsec_sa *sa, bool is_tx,
315 struct net_device *netdev, u32 fs_id)
316 {
317 int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
318 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
319
320 if (!sa->macsec_rule)
321 return;
322
323 mlx5_macsec_fs_del_rule(macsec->mdev->macsec_fs, sa->macsec_rule, action, netdev,
324 fs_id);
325 sa->macsec_rule = NULL;
326 }
327
mlx5e_macsec_cleanup_sa(struct mlx5e_macsec * macsec,struct mlx5e_macsec_sa * sa,bool is_tx,struct net_device * netdev,u32 fs_id)328 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
329 struct mlx5e_macsec_sa *sa, bool is_tx,
330 struct net_device *netdev, u32 fs_id)
331 {
332 mlx5e_macsec_cleanup_sa_fs(macsec, sa, is_tx, netdev, fs_id);
333 mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
334 }
335
mlx5e_macsec_init_sa_fs(struct macsec_context * ctx,struct mlx5e_macsec_sa * sa,bool encrypt,bool is_tx,u32 * fs_id)336 static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
337 struct mlx5e_macsec_sa *sa, bool encrypt,
338 bool is_tx, u32 *fs_id)
339 {
340 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
341 struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
342 struct mlx5_macsec_rule_attrs rule_attrs;
343 union mlx5_macsec_rule *macsec_rule;
344
345 rule_attrs.macsec_obj_id = sa->macsec_obj_id;
346 rule_attrs.sci = sa->sci;
347 rule_attrs.assoc_num = sa->assoc_num;
348 rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
349 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
350
351 macsec_rule = mlx5_macsec_fs_add_rule(macsec_fs, ctx, &rule_attrs, fs_id);
352 if (!macsec_rule)
353 return -ENOMEM;
354
355 sa->macsec_rule = macsec_rule;
356
357 return 0;
358 }
359
mlx5e_macsec_init_sa(struct macsec_context * ctx,struct mlx5e_macsec_sa * sa,bool encrypt,bool is_tx,u32 * fs_id)360 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
361 struct mlx5e_macsec_sa *sa,
362 bool encrypt, bool is_tx, u32 *fs_id)
363 {
364 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
365 struct mlx5e_macsec *macsec = priv->macsec;
366 struct mlx5_core_dev *mdev = priv->mdev;
367 struct mlx5_macsec_obj_attrs obj_attrs;
368 int err;
369
370 obj_attrs.next_pn = sa->next_pn;
371 obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
372 obj_attrs.enc_key_id = sa->enc_key_id;
373 obj_attrs.encrypt = encrypt;
374 obj_attrs.aso_pdn = macsec->aso.pdn;
375 obj_attrs.epn_state = sa->epn_state;
376
377 if (sa->epn_state.epn_enabled) {
378 obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
379 memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
380 }
381
382 obj_attrs.replay_window = ctx->secy->replay_window;
383 obj_attrs.replay_protect = ctx->secy->replay_protect;
384
385 err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
386 if (err)
387 return err;
388
389 if (sa->active) {
390 err = mlx5e_macsec_init_sa_fs(ctx, sa, encrypt, is_tx, fs_id);
391 if (err)
392 goto destroy_macsec_object;
393 }
394
395 return 0;
396
397 destroy_macsec_object:
398 mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
399
400 return err;
401 }
402
403 static struct mlx5e_macsec_rx_sc *
mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head * list,sci_t sci)404 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
405 {
406 struct mlx5e_macsec_rx_sc *iter;
407
408 list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
409 if (iter->sci == sci)
410 return iter;
411 }
412
413 return NULL;
414 }
415
macsec_rx_sa_active_update(struct macsec_context * ctx,struct mlx5e_macsec_sa * rx_sa,bool active,u32 * fs_id)416 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
417 struct mlx5e_macsec_sa *rx_sa,
418 bool active, u32 *fs_id)
419 {
420 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
421 struct mlx5e_macsec *macsec = priv->macsec;
422 int err = 0;
423
424 if (rx_sa->active == active)
425 return 0;
426
427 rx_sa->active = active;
428 if (!active) {
429 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev, *fs_id);
430 return 0;
431 }
432
433 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, fs_id);
434 if (err)
435 rx_sa->active = false;
436
437 return err;
438 }
439
mlx5e_macsec_secy_features_validate(struct macsec_context * ctx)440 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
441 {
442 const struct net_device *netdev = ctx->netdev;
443 const struct macsec_secy *secy = ctx->secy;
444
445 if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
446 netdev_err(netdev,
447 "MACsec offload is supported only when validate_frame is in strict mode\n");
448 return false;
449 }
450
451 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
452 netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
453 MACSEC_DEFAULT_ICV_LEN);
454 return false;
455 }
456
457 if (!secy->protect_frames) {
458 netdev_err(netdev,
459 "MACsec offload is supported only when protect_frames is set\n");
460 return false;
461 }
462
463 if (!ctx->secy->tx_sc.encrypt) {
464 netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
465 return false;
466 }
467
468 return true;
469 }
470
471 static struct mlx5e_macsec_device *
mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec * macsec,const struct macsec_context * ctx)472 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
473 const struct macsec_context *ctx)
474 {
475 struct mlx5e_macsec_device *iter;
476 const struct list_head *list;
477
478 list = &macsec->macsec_device_list_head;
479 list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
480 if (iter->netdev == ctx->secy->netdev)
481 return iter;
482 }
483
484 return NULL;
485 }
486
update_macsec_epn(struct mlx5e_macsec_sa * sa,const struct macsec_key * key,const pn_t * next_pn_halves,ssci_t ssci)487 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
488 const pn_t *next_pn_halves, ssci_t ssci)
489 {
490 struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
491
492 sa->ssci = ssci;
493 sa->salt = key->salt;
494 epn_state->epn_enabled = 1;
495 epn_state->epn_msb = next_pn_halves->upper;
496 epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
497 }
498
mlx5e_macsec_add_txsa(struct macsec_context * ctx)499 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
500 {
501 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
502 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
503 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
504 const struct macsec_secy *secy = ctx->secy;
505 struct mlx5e_macsec_device *macsec_device;
506 struct mlx5_core_dev *mdev = priv->mdev;
507 u8 assoc_num = ctx->sa.assoc_num;
508 struct mlx5e_macsec_sa *tx_sa;
509 struct mlx5e_macsec *macsec;
510 int err = 0;
511
512 mutex_lock(&priv->macsec->lock);
513
514 macsec = priv->macsec;
515 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
516 if (!macsec_device) {
517 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
518 err = -EEXIST;
519 goto out;
520 }
521
522 if (macsec_device->tx_sa[assoc_num]) {
523 netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
524 err = -EEXIST;
525 goto out;
526 }
527
528 tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
529 if (!tx_sa) {
530 err = -ENOMEM;
531 goto out;
532 }
533
534 tx_sa->active = ctx_tx_sa->active;
535 tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
536 tx_sa->sci = secy->sci;
537 tx_sa->assoc_num = assoc_num;
538
539 if (secy->xpn)
540 update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
541 ctx_tx_sa->ssci);
542
543 err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
544 MLX5_ACCEL_OBJ_MACSEC_KEY,
545 &tx_sa->enc_key_id);
546 if (err)
547 goto destroy_sa;
548
549 macsec_device->tx_sa[assoc_num] = tx_sa;
550 if (!secy->operational)
551 goto out;
552
553 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true, NULL);
554 if (err)
555 goto destroy_encryption_key;
556
557 mutex_unlock(&macsec->lock);
558
559 return 0;
560
561 destroy_encryption_key:
562 macsec_device->tx_sa[assoc_num] = NULL;
563 mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
564 destroy_sa:
565 kfree(tx_sa);
566 out:
567 mutex_unlock(&macsec->lock);
568
569 return err;
570 }
571
mlx5e_macsec_upd_txsa(struct macsec_context * ctx)572 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
573 {
574 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
575 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
576 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
577 struct mlx5e_macsec_device *macsec_device;
578 u8 assoc_num = ctx->sa.assoc_num;
579 struct mlx5e_macsec_sa *tx_sa;
580 struct mlx5e_macsec *macsec;
581 struct net_device *netdev;
582 int err = 0;
583
584 mutex_lock(&priv->macsec->lock);
585
586 macsec = priv->macsec;
587 netdev = ctx->netdev;
588 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
589 if (!macsec_device) {
590 netdev_err(netdev, "MACsec offload: Failed to find device context\n");
591 err = -EINVAL;
592 goto out;
593 }
594
595 tx_sa = macsec_device->tx_sa[assoc_num];
596 if (!tx_sa) {
597 netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
598 err = -EEXIST;
599 goto out;
600 }
601
602 if (ctx->sa.update_pn) {
603 netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
604 assoc_num);
605 err = -EINVAL;
606 goto out;
607 }
608
609 if (tx_sa->active == ctx_tx_sa->active)
610 goto out;
611
612 tx_sa->active = ctx_tx_sa->active;
613 if (tx_sa->assoc_num != tx_sc->encoding_sa)
614 goto out;
615
616 if (ctx_tx_sa->active) {
617 err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
618 if (err)
619 goto out;
620 } else {
621 if (!tx_sa->macsec_rule) {
622 err = -EINVAL;
623 goto out;
624 }
625
626 mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
627 }
628 out:
629 mutex_unlock(&macsec->lock);
630
631 return err;
632 }
633
mlx5e_macsec_del_txsa(struct macsec_context * ctx)634 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
635 {
636 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
637 struct mlx5e_macsec_device *macsec_device;
638 u8 assoc_num = ctx->sa.assoc_num;
639 struct mlx5e_macsec_sa *tx_sa;
640 struct mlx5e_macsec *macsec;
641 int err = 0;
642
643 mutex_lock(&priv->macsec->lock);
644 macsec = priv->macsec;
645 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
646 if (!macsec_device) {
647 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
648 err = -EINVAL;
649 goto out;
650 }
651
652 tx_sa = macsec_device->tx_sa[assoc_num];
653 if (!tx_sa) {
654 netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
655 err = -EEXIST;
656 goto out;
657 }
658
659 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
660 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
661 kfree_rcu_mightsleep(tx_sa);
662 macsec_device->tx_sa[assoc_num] = NULL;
663
664 out:
665 mutex_unlock(&macsec->lock);
666
667 return err;
668 }
669
mlx5e_macsec_add_rxsc(struct macsec_context * ctx)670 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
671 {
672 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
673 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
674 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
675 struct mlx5e_macsec_device *macsec_device;
676 struct mlx5e_macsec_rx_sc *rx_sc;
677 struct list_head *rx_sc_list;
678 struct mlx5e_macsec *macsec;
679 int err = 0;
680
681 mutex_lock(&priv->macsec->lock);
682 macsec = priv->macsec;
683 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
684 if (!macsec_device) {
685 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
686 err = -EINVAL;
687 goto out;
688 }
689
690 rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
691 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
692 if (rx_sc) {
693 netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
694 ctx_rx_sc->sci);
695 err = -EEXIST;
696 goto out;
697 }
698
699 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
700 if (!rx_sc) {
701 err = -ENOMEM;
702 goto out;
703 }
704
705 sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
706 if (!sc_xarray_element) {
707 err = -ENOMEM;
708 goto destroy_rx_sc;
709 }
710
711 sc_xarray_element->rx_sc = rx_sc;
712 err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
713 XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
714 if (err) {
715 if (err == -EBUSY)
716 netdev_err(ctx->netdev,
717 "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
718 MLX5_MACEC_RX_FS_ID_MAX);
719 goto destroy_sc_xarray_elemenet;
720 }
721
722 rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
723 if (!rx_sc->md_dst) {
724 err = -ENOMEM;
725 goto erase_xa_alloc;
726 }
727
728 rx_sc->sci = ctx_rx_sc->sci;
729 rx_sc->active = ctx_rx_sc->active;
730 list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
731
732 rx_sc->sc_xarray_element = sc_xarray_element;
733 rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
734 mutex_unlock(&macsec->lock);
735
736 return 0;
737
738 erase_xa_alloc:
739 xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
740 destroy_sc_xarray_elemenet:
741 kfree(sc_xarray_element);
742 destroy_rx_sc:
743 kfree(rx_sc);
744
745 out:
746 mutex_unlock(&macsec->lock);
747
748 return err;
749 }
750
mlx5e_macsec_upd_rxsc(struct macsec_context * ctx)751 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
752 {
753 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
754 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
755 struct mlx5e_macsec_device *macsec_device;
756 struct mlx5e_macsec_rx_sc *rx_sc;
757 struct mlx5e_macsec_sa *rx_sa;
758 struct mlx5e_macsec *macsec;
759 struct list_head *list;
760 int i;
761 int err = 0;
762
763 mutex_lock(&priv->macsec->lock);
764
765 macsec = priv->macsec;
766 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
767 if (!macsec_device) {
768 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
769 err = -EINVAL;
770 goto out;
771 }
772
773 list = &macsec_device->macsec_rx_sc_list_head;
774 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
775 if (!rx_sc) {
776 err = -EINVAL;
777 goto out;
778 }
779
780 if (rx_sc->active == ctx_rx_sc->active)
781 goto out;
782
783 rx_sc->active = ctx_rx_sc->active;
784 for (i = 0; i < MACSEC_NUM_AN; ++i) {
785 rx_sa = rx_sc->rx_sa[i];
786 if (!rx_sa)
787 continue;
788
789 err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active,
790 &rx_sc->sc_xarray_element->fs_id);
791 if (err)
792 goto out;
793 }
794
795 out:
796 mutex_unlock(&macsec->lock);
797
798 return err;
799 }
800
macsec_del_rxsc_ctx(struct mlx5e_macsec * macsec,struct mlx5e_macsec_rx_sc * rx_sc,struct net_device * netdev)801 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc,
802 struct net_device *netdev)
803 {
804 struct mlx5e_macsec_sa *rx_sa;
805 int i;
806
807 for (i = 0; i < MACSEC_NUM_AN; ++i) {
808 rx_sa = rx_sc->rx_sa[i];
809 if (!rx_sa)
810 continue;
811
812 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, netdev,
813 rx_sc->sc_xarray_element->fs_id);
814 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
815
816 kfree(rx_sa);
817 rx_sc->rx_sa[i] = NULL;
818 }
819
820 /* At this point the relevant MACsec offload Rx rule already removed at
821 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
822 * Rx related data propagating using xa_erase which uses rcu to sync,
823 * once fs_id is erased then this rx_sc is hidden from datapath.
824 */
825 list_del_rcu(&rx_sc->rx_sc_list_element);
826 xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
827 metadata_dst_free(rx_sc->md_dst);
828 kfree(rx_sc->sc_xarray_element);
829 kfree_rcu_mightsleep(rx_sc);
830 }
831
mlx5e_macsec_del_rxsc(struct macsec_context * ctx)832 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
833 {
834 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
835 struct mlx5e_macsec_device *macsec_device;
836 struct mlx5e_macsec_rx_sc *rx_sc;
837 struct mlx5e_macsec *macsec;
838 struct list_head *list;
839 int err = 0;
840
841 mutex_lock(&priv->macsec->lock);
842
843 macsec = priv->macsec;
844 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
845 if (!macsec_device) {
846 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
847 err = -EINVAL;
848 goto out;
849 }
850
851 list = &macsec_device->macsec_rx_sc_list_head;
852 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
853 if (!rx_sc) {
854 netdev_err(ctx->netdev,
855 "MACsec offload rx_sc sci %lld doesn't exist\n",
856 ctx->sa.rx_sa->sc->sci);
857 err = -EINVAL;
858 goto out;
859 }
860
861 macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
862 out:
863 mutex_unlock(&macsec->lock);
864
865 return err;
866 }
867
mlx5e_macsec_add_rxsa(struct macsec_context * ctx)868 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
869 {
870 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
871 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
872 struct mlx5e_macsec_device *macsec_device;
873 struct mlx5_core_dev *mdev = priv->mdev;
874 u8 assoc_num = ctx->sa.assoc_num;
875 struct mlx5e_macsec_rx_sc *rx_sc;
876 sci_t sci = ctx_rx_sa->sc->sci;
877 struct mlx5e_macsec_sa *rx_sa;
878 struct mlx5e_macsec *macsec;
879 struct list_head *list;
880 int err = 0;
881
882 mutex_lock(&priv->macsec->lock);
883
884 macsec = priv->macsec;
885 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
886 if (!macsec_device) {
887 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
888 err = -EINVAL;
889 goto out;
890 }
891
892 list = &macsec_device->macsec_rx_sc_list_head;
893 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
894 if (!rx_sc) {
895 netdev_err(ctx->netdev,
896 "MACsec offload rx_sc sci %lld doesn't exist\n",
897 ctx->sa.rx_sa->sc->sci);
898 err = -EINVAL;
899 goto out;
900 }
901
902 if (rx_sc->rx_sa[assoc_num]) {
903 netdev_err(ctx->netdev,
904 "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
905 sci, assoc_num);
906 err = -EEXIST;
907 goto out;
908 }
909
910 rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
911 if (!rx_sa) {
912 err = -ENOMEM;
913 goto out;
914 }
915
916 rx_sa->active = ctx_rx_sa->active;
917 rx_sa->next_pn = ctx_rx_sa->next_pn;
918 rx_sa->sci = sci;
919 rx_sa->assoc_num = assoc_num;
920
921 if (ctx->secy->xpn)
922 update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
923 ctx_rx_sa->ssci);
924
925 err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
926 MLX5_ACCEL_OBJ_MACSEC_KEY,
927 &rx_sa->enc_key_id);
928 if (err)
929 goto destroy_sa;
930
931 rx_sc->rx_sa[assoc_num] = rx_sa;
932 if (!rx_sa->active)
933 goto out;
934
935 //TODO - add support for both authentication and encryption flows
936 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false, &rx_sc->sc_xarray_element->fs_id);
937 if (err)
938 goto destroy_encryption_key;
939
940 goto out;
941
942 destroy_encryption_key:
943 rx_sc->rx_sa[assoc_num] = NULL;
944 mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
945 destroy_sa:
946 kfree(rx_sa);
947 out:
948 mutex_unlock(&macsec->lock);
949
950 return err;
951 }
952
mlx5e_macsec_upd_rxsa(struct macsec_context * ctx)953 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
954 {
955 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
956 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
957 struct mlx5e_macsec_device *macsec_device;
958 u8 assoc_num = ctx->sa.assoc_num;
959 struct mlx5e_macsec_rx_sc *rx_sc;
960 sci_t sci = ctx_rx_sa->sc->sci;
961 struct mlx5e_macsec_sa *rx_sa;
962 struct mlx5e_macsec *macsec;
963 struct list_head *list;
964 int err = 0;
965
966 mutex_lock(&priv->macsec->lock);
967
968 macsec = priv->macsec;
969 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
970 if (!macsec_device) {
971 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
972 err = -EINVAL;
973 goto out;
974 }
975
976 list = &macsec_device->macsec_rx_sc_list_head;
977 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
978 if (!rx_sc) {
979 netdev_err(ctx->netdev,
980 "MACsec offload rx_sc sci %lld doesn't exist\n",
981 ctx->sa.rx_sa->sc->sci);
982 err = -EINVAL;
983 goto out;
984 }
985
986 rx_sa = rx_sc->rx_sa[assoc_num];
987 if (!rx_sa) {
988 netdev_err(ctx->netdev,
989 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
990 sci, assoc_num);
991 err = -EINVAL;
992 goto out;
993 }
994
995 if (ctx->sa.update_pn) {
996 netdev_err(ctx->netdev,
997 "MACsec offload update RX sa %d PN isn't supported\n",
998 assoc_num);
999 err = -EINVAL;
1000 goto out;
1001 }
1002
1003 err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active,
1004 &rx_sc->sc_xarray_element->fs_id);
1005 out:
1006 mutex_unlock(&macsec->lock);
1007
1008 return err;
1009 }
1010
mlx5e_macsec_del_rxsa(struct macsec_context * ctx)1011 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
1012 {
1013 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1014 struct mlx5e_macsec_device *macsec_device;
1015 sci_t sci = ctx->sa.rx_sa->sc->sci;
1016 struct mlx5e_macsec_rx_sc *rx_sc;
1017 u8 assoc_num = ctx->sa.assoc_num;
1018 struct mlx5e_macsec_sa *rx_sa;
1019 struct mlx5e_macsec *macsec;
1020 struct list_head *list;
1021 int err = 0;
1022
1023 mutex_lock(&priv->macsec->lock);
1024
1025 macsec = priv->macsec;
1026 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1027 if (!macsec_device) {
1028 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1029 err = -EINVAL;
1030 goto out;
1031 }
1032
1033 list = &macsec_device->macsec_rx_sc_list_head;
1034 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1035 if (!rx_sc) {
1036 netdev_err(ctx->netdev,
1037 "MACsec offload rx_sc sci %lld doesn't exist\n",
1038 ctx->sa.rx_sa->sc->sci);
1039 err = -EINVAL;
1040 goto out;
1041 }
1042
1043 rx_sa = rx_sc->rx_sa[assoc_num];
1044 if (!rx_sa) {
1045 netdev_err(ctx->netdev,
1046 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1047 sci, assoc_num);
1048 err = -EINVAL;
1049 goto out;
1050 }
1051
1052 if (rx_sa->active)
1053 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false, ctx->secy->netdev,
1054 rx_sc->sc_xarray_element->fs_id);
1055 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1056 kfree(rx_sa);
1057 rx_sc->rx_sa[assoc_num] = NULL;
1058
1059 out:
1060 mutex_unlock(&macsec->lock);
1061
1062 return err;
1063 }
1064
mlx5e_macsec_add_secy(struct macsec_context * ctx)1065 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1066 {
1067 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1068 const struct net_device *dev = ctx->secy->netdev;
1069 const struct net_device *netdev = ctx->netdev;
1070 struct mlx5e_macsec_device *macsec_device;
1071 struct mlx5e_macsec *macsec;
1072 int err = 0;
1073
1074 if (!mlx5e_macsec_secy_features_validate(ctx))
1075 return -EINVAL;
1076
1077 mutex_lock(&priv->macsec->lock);
1078 macsec = priv->macsec;
1079 if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1080 netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1081 goto out;
1082 }
1083
1084 if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1085 netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1086 MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1087 err = -EBUSY;
1088 goto out;
1089 }
1090
1091 macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1092 if (!macsec_device) {
1093 err = -ENOMEM;
1094 goto out;
1095 }
1096
1097 macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1098 if (!macsec_device->dev_addr) {
1099 kfree(macsec_device);
1100 err = -ENOMEM;
1101 goto out;
1102 }
1103
1104 macsec_device->netdev = dev;
1105
1106 INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1107 list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1108
1109 ++macsec->num_of_devices;
1110 out:
1111 mutex_unlock(&macsec->lock);
1112
1113 return err;
1114 }
1115
macsec_upd_secy_hw_address(struct macsec_context * ctx,struct mlx5e_macsec_device * macsec_device)1116 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1117 struct mlx5e_macsec_device *macsec_device)
1118 {
1119 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1120 const struct net_device *dev = ctx->secy->netdev;
1121 struct mlx5e_macsec *macsec = priv->macsec;
1122 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1123 struct mlx5e_macsec_sa *rx_sa;
1124 struct list_head *list;
1125 int i, err = 0;
1126
1127
1128 list = &macsec_device->macsec_rx_sc_list_head;
1129 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1130 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1131 rx_sa = rx_sc->rx_sa[i];
1132 if (!rx_sa || !rx_sa->macsec_rule)
1133 continue;
1134
1135 mlx5e_macsec_cleanup_sa_fs(macsec, rx_sa, false, ctx->secy->netdev,
1136 rx_sc->sc_xarray_element->fs_id);
1137 }
1138 }
1139
1140 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1141 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1142 rx_sa = rx_sc->rx_sa[i];
1143 if (!rx_sa)
1144 continue;
1145
1146 if (rx_sa->active) {
1147 err = mlx5e_macsec_init_sa_fs(ctx, rx_sa, true, false,
1148 &rx_sc->sc_xarray_element->fs_id);
1149 if (err)
1150 goto out;
1151 }
1152 }
1153 }
1154
1155 memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1156 out:
1157 return err;
1158 }
1159
1160 /* this function is called from 2 macsec ops functions:
1161 * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1162 * and create new Tx contexts(macsec object + steering).
1163 * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1164 * destroy Tx and Rx contexts(macsec object + steering)
1165 */
mlx5e_macsec_upd_secy(struct macsec_context * ctx)1166 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1167 {
1168 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1169 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1170 const struct net_device *dev = ctx->secy->netdev;
1171 struct mlx5e_macsec_device *macsec_device;
1172 struct mlx5e_macsec_sa *tx_sa;
1173 struct mlx5e_macsec *macsec;
1174 int i, err = 0;
1175
1176 if (!mlx5e_macsec_secy_features_validate(ctx))
1177 return -EINVAL;
1178
1179 mutex_lock(&priv->macsec->lock);
1180
1181 macsec = priv->macsec;
1182 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1183 if (!macsec_device) {
1184 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1185 err = -EINVAL;
1186 goto out;
1187 }
1188
1189 /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1190 if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1191 err = macsec_upd_secy_hw_address(ctx, macsec_device);
1192 if (err)
1193 goto out;
1194 }
1195
1196 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1197 tx_sa = macsec_device->tx_sa[i];
1198 if (!tx_sa)
1199 continue;
1200
1201 mlx5e_macsec_cleanup_sa_fs(macsec, tx_sa, true, ctx->secy->netdev, 0);
1202 }
1203
1204 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1205 tx_sa = macsec_device->tx_sa[i];
1206 if (!tx_sa)
1207 continue;
1208
1209 if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1210 err = mlx5e_macsec_init_sa_fs(ctx, tx_sa, tx_sc->encrypt, true, NULL);
1211 if (err)
1212 goto out;
1213 }
1214 }
1215
1216 out:
1217 mutex_unlock(&macsec->lock);
1218
1219 return err;
1220 }
1221
mlx5e_macsec_del_secy(struct macsec_context * ctx)1222 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1223 {
1224 struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
1225 struct mlx5e_macsec_device *macsec_device;
1226 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1227 struct mlx5e_macsec_sa *tx_sa;
1228 struct mlx5e_macsec *macsec;
1229 struct list_head *list;
1230 int err = 0;
1231 int i;
1232
1233 mutex_lock(&priv->macsec->lock);
1234 macsec = priv->macsec;
1235 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1236 if (!macsec_device) {
1237 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1238 err = -EINVAL;
1239
1240 goto out;
1241 }
1242
1243 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1244 tx_sa = macsec_device->tx_sa[i];
1245 if (!tx_sa)
1246 continue;
1247
1248 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true, ctx->secy->netdev, 0);
1249 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1250 kfree(tx_sa);
1251 macsec_device->tx_sa[i] = NULL;
1252 }
1253
1254 list = &macsec_device->macsec_rx_sc_list_head;
1255 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1256 macsec_del_rxsc_ctx(macsec, rx_sc, ctx->secy->netdev);
1257
1258 kfree(macsec_device->dev_addr);
1259 macsec_device->dev_addr = NULL;
1260
1261 list_del_rcu(&macsec_device->macsec_device_list_element);
1262 --macsec->num_of_devices;
1263 kfree(macsec_device);
1264
1265 out:
1266 mutex_unlock(&macsec->lock);
1267
1268 return err;
1269 }
1270
macsec_build_accel_attrs(struct mlx5e_macsec_sa * sa,struct mlx5_macsec_obj_attrs * attrs)1271 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1272 struct mlx5_macsec_obj_attrs *attrs)
1273 {
1274 attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1275 attrs->epn_state.overlap = sa->epn_state.overlap;
1276 }
1277
macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso * macsec_aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5_aso_ctrl_param * param)1278 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1279 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1280 struct mlx5_aso_ctrl_param *param)
1281 {
1282 struct mlx5e_macsec_umr *umr = macsec_aso->umr;
1283
1284 memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1285 aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
1286 aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
1287 aso_ctrl->l_key = cpu_to_be32(umr->mkey);
1288
1289 if (!param)
1290 return;
1291
1292 aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1293 aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1294 param->condition_0_operand << 4;
1295 aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1296 param->condition_0_offset << 4;
1297 aso_ctrl->data_offset_condition_operand = param->data_offset |
1298 param->condition_operand << 6;
1299 aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1300 aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1301 aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1302 aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1303 aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1304 aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1305 }
1306
mlx5e_macsec_modify_obj(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,u32 macsec_id)1307 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1308 u32 macsec_id)
1309 {
1310 u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1311 u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1312 u64 modify_field_select = 0;
1313 void *obj;
1314 int err;
1315
1316 /* General object fields set */
1317 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1318 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1319 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1320 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1321 if (err) {
1322 mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1323 macsec_id, err);
1324 return err;
1325 }
1326
1327 obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1328 modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1329
1330 /* EPN */
1331 if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1332 !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1333 mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1334 macsec_id);
1335 return -EOPNOTSUPP;
1336 }
1337
1338 obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1339 MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1340 MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1341 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1342 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1343
1344 /* General object fields set */
1345 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1346
1347 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1348 }
1349
macsec_aso_build_ctrl(struct mlx5e_macsec_aso * aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5e_macsec_aso_in * in)1350 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1351 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1352 struct mlx5e_macsec_aso_in *in)
1353 {
1354 struct mlx5_aso_ctrl_param param = {};
1355
1356 param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1357 param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1358 param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1359 if (in->mode == MLX5_MACSEC_EPN) {
1360 param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1361 param.bitwise_data = BIT_ULL(54);
1362 param.data_mask = param.bitwise_data;
1363 }
1364 macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, ¶m);
1365 }
1366
macsec_aso_set_arm_event(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in)1367 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1368 struct mlx5e_macsec_aso_in *in)
1369 {
1370 struct mlx5e_macsec_aso *aso;
1371 struct mlx5_aso_wqe *aso_wqe;
1372 struct mlx5_aso *maso;
1373 int err;
1374
1375 aso = &macsec->aso;
1376 maso = aso->maso;
1377
1378 mutex_lock(&aso->aso_lock);
1379 aso_wqe = mlx5_aso_get_wqe(maso);
1380 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1381 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1382 macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1383 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1384 err = mlx5_aso_poll_cq(maso, false);
1385 mutex_unlock(&aso->aso_lock);
1386
1387 return err;
1388 }
1389
macsec_aso_query(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in,struct mlx5e_macsec_aso_out * out)1390 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1391 struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1392 {
1393 struct mlx5e_macsec_aso *aso;
1394 struct mlx5_aso_wqe *aso_wqe;
1395 struct mlx5_aso *maso;
1396 unsigned long expires;
1397 int err;
1398
1399 aso = &macsec->aso;
1400 maso = aso->maso;
1401
1402 mutex_lock(&aso->aso_lock);
1403
1404 aso_wqe = mlx5_aso_get_wqe(maso);
1405 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1406 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1407 macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1408
1409 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1410 expires = jiffies + msecs_to_jiffies(10);
1411 do {
1412 err = mlx5_aso_poll_cq(maso, false);
1413 if (err)
1414 usleep_range(2, 10);
1415 } while (err && time_is_after_jiffies(expires));
1416
1417 if (err)
1418 goto err_out;
1419
1420 if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1421 out->event_arm |= MLX5E_ASO_EPN_ARM;
1422
1423 out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1424
1425 err_out:
1426 mutex_unlock(&aso->aso_lock);
1427 return err;
1428 }
1429
get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1430 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1431 const u32 obj_id)
1432 {
1433 const struct list_head *device_list;
1434 struct mlx5e_macsec_sa *macsec_sa;
1435 struct mlx5e_macsec_device *iter;
1436 int i;
1437
1438 device_list = &macsec->macsec_device_list_head;
1439
1440 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1441 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1442 macsec_sa = iter->tx_sa[i];
1443 if (!macsec_sa || !macsec_sa->active)
1444 continue;
1445 if (macsec_sa->macsec_obj_id == obj_id)
1446 return macsec_sa;
1447 }
1448 }
1449
1450 return NULL;
1451 }
1452
get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1453 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1454 const u32 obj_id)
1455 {
1456 const struct list_head *device_list, *sc_list;
1457 struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1458 struct mlx5e_macsec_sa *macsec_sa;
1459 struct mlx5e_macsec_device *iter;
1460 int i;
1461
1462 device_list = &macsec->macsec_device_list_head;
1463
1464 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1465 sc_list = &iter->macsec_rx_sc_list_head;
1466 list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1467 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1468 macsec_sa = mlx5e_rx_sc->rx_sa[i];
1469 if (!macsec_sa || !macsec_sa->active)
1470 continue;
1471 if (macsec_sa->macsec_obj_id == obj_id)
1472 return macsec_sa;
1473 }
1474 }
1475 }
1476
1477 return NULL;
1478 }
1479
macsec_epn_update(struct mlx5e_macsec * macsec,struct mlx5_core_dev * mdev,struct mlx5e_macsec_sa * sa,u32 obj_id,u32 mode_param)1480 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1481 struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1482 {
1483 struct mlx5_macsec_obj_attrs attrs = {};
1484 struct mlx5e_macsec_aso_in in = {};
1485
1486 /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1487 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1488 * esn_overlap to OLD (1).
1489 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1490 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1491 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1492 */
1493
1494 if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1495 sa->epn_state.epn_msb++;
1496 sa->epn_state.overlap = 0;
1497 } else {
1498 sa->epn_state.overlap = 1;
1499 }
1500
1501 macsec_build_accel_attrs(sa, &attrs);
1502 mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1503
1504 /* Re-set EPN arm event */
1505 in.obj_id = obj_id;
1506 in.mode = MLX5_MACSEC_EPN;
1507 macsec_aso_set_arm_event(mdev, macsec, &in);
1508 }
1509
macsec_async_event(struct work_struct * work)1510 static void macsec_async_event(struct work_struct *work)
1511 {
1512 struct mlx5e_macsec_async_work *async_work;
1513 struct mlx5e_macsec_aso_out out = {};
1514 struct mlx5e_macsec_aso_in in = {};
1515 struct mlx5e_macsec_sa *macsec_sa;
1516 struct mlx5e_macsec *macsec;
1517 struct mlx5_core_dev *mdev;
1518 u32 obj_id;
1519
1520 async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1521 macsec = async_work->macsec;
1522 mutex_lock(&macsec->lock);
1523
1524 mdev = async_work->mdev;
1525 obj_id = async_work->obj_id;
1526 macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1527 if (!macsec_sa) {
1528 macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1529 if (!macsec_sa) {
1530 mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1531 goto out_async_work;
1532 }
1533 }
1534
1535 /* Query MACsec ASO context */
1536 in.obj_id = obj_id;
1537 macsec_aso_query(mdev, macsec, &in, &out);
1538
1539 /* EPN case */
1540 if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1541 macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1542
1543 out_async_work:
1544 kfree(async_work);
1545 mutex_unlock(&macsec->lock);
1546 }
1547
macsec_obj_change_event(struct notifier_block * nb,unsigned long event,void * data)1548 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1549 {
1550 struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1551 struct mlx5e_macsec_async_work *async_work;
1552 struct mlx5_eqe_obj_change *obj_change;
1553 struct mlx5_eqe *eqe = data;
1554 u16 obj_type;
1555 u32 obj_id;
1556
1557 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1558 return NOTIFY_DONE;
1559
1560 obj_change = &eqe->data.obj_change;
1561 obj_type = be16_to_cpu(obj_change->obj_type);
1562 obj_id = be32_to_cpu(obj_change->obj_id);
1563
1564 if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1565 return NOTIFY_DONE;
1566
1567 async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1568 if (!async_work)
1569 return NOTIFY_DONE;
1570
1571 async_work->macsec = macsec;
1572 async_work->mdev = macsec->mdev;
1573 async_work->obj_id = obj_id;
1574
1575 INIT_WORK(&async_work->work, macsec_async_event);
1576
1577 WARN_ON(!queue_work(macsec->wq, &async_work->work));
1578
1579 return NOTIFY_OK;
1580 }
1581
mlx5e_macsec_aso_init(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1582 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1583 {
1584 struct mlx5_aso *maso;
1585 int err;
1586
1587 err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1588 if (err) {
1589 mlx5_core_err(mdev,
1590 "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1591 err);
1592 return err;
1593 }
1594
1595 maso = mlx5_aso_create(mdev, aso->pdn);
1596 if (IS_ERR(maso)) {
1597 err = PTR_ERR(maso);
1598 goto err_aso;
1599 }
1600
1601 err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1602 if (err)
1603 goto err_aso_reg;
1604
1605 mutex_init(&aso->aso_lock);
1606
1607 aso->maso = maso;
1608
1609 return 0;
1610
1611 err_aso_reg:
1612 mlx5_aso_destroy(maso);
1613 err_aso:
1614 mlx5_core_dealloc_pd(mdev, aso->pdn);
1615 return err;
1616 }
1617
mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1618 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1619 {
1620 if (!aso)
1621 return;
1622
1623 mlx5e_macsec_aso_dereg_mr(mdev, aso);
1624
1625 mlx5_aso_destroy(aso->maso);
1626
1627 mlx5_core_dealloc_pd(mdev, aso->pdn);
1628 }
1629
1630 static const struct macsec_ops macsec_offload_ops = {
1631 .mdo_add_txsa = mlx5e_macsec_add_txsa,
1632 .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1633 .mdo_del_txsa = mlx5e_macsec_del_txsa,
1634 .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1635 .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1636 .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1637 .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1638 .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1639 .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1640 .mdo_add_secy = mlx5e_macsec_add_secy,
1641 .mdo_upd_secy = mlx5e_macsec_upd_secy,
1642 .mdo_del_secy = mlx5e_macsec_del_secy,
1643 .rx_uses_md_dst = true,
1644 };
1645
mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec * macsec,struct sk_buff * skb)1646 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1647 {
1648 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1649 u32 fs_id;
1650
1651 fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1652 &md_dst->u.macsec_info.sci);
1653 if (!fs_id)
1654 goto err_out;
1655
1656 return true;
1657
1658 err_out:
1659 dev_kfree_skb_any(skb);
1660 return false;
1661 }
1662
mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec * macsec,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)1663 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1664 struct sk_buff *skb,
1665 struct mlx5_wqe_eth_seg *eseg)
1666 {
1667 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1668 u32 fs_id;
1669
1670 fs_id = mlx5_macsec_fs_get_fs_id_from_hashtable(macsec->mdev->macsec_fs,
1671 &md_dst->u.macsec_info.sci);
1672 if (!fs_id)
1673 return;
1674
1675 eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1676 }
1677
mlx5e_macsec_offload_handle_rx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5_cqe64 * cqe)1678 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1679 struct sk_buff *skb,
1680 struct mlx5_cqe64 *cqe)
1681 {
1682 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1683 u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1684 struct mlx5e_priv *priv = macsec_netdev_priv(netdev);
1685 struct mlx5e_macsec_rx_sc *rx_sc;
1686 struct mlx5e_macsec *macsec;
1687 u32 fs_id;
1688
1689 macsec = priv->macsec;
1690 if (!macsec)
1691 return;
1692
1693 fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1694
1695 rcu_read_lock();
1696 sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1697 rx_sc = sc_xarray_element->rx_sc;
1698 if (rx_sc) {
1699 dst_hold(&rx_sc->md_dst->dst);
1700 skb_dst_set(skb, &rx_sc->md_dst->dst);
1701 }
1702
1703 rcu_read_unlock();
1704 }
1705
mlx5e_macsec_build_netdev(struct mlx5e_priv * priv)1706 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1707 {
1708 struct net_device *netdev = priv->netdev;
1709
1710 if (!mlx5e_is_macsec_device(priv->mdev))
1711 return;
1712
1713 /* Enable MACsec */
1714 mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1715 netdev->macsec_ops = &macsec_offload_ops;
1716 netdev->features |= NETIF_F_HW_MACSEC;
1717 netif_keep_dst(netdev);
1718 }
1719
mlx5e_macsec_init(struct mlx5e_priv * priv)1720 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1721 {
1722 struct mlx5_core_dev *mdev = priv->mdev;
1723 struct mlx5e_macsec *macsec = NULL;
1724 struct mlx5_macsec_fs *macsec_fs;
1725 int err;
1726
1727 if (!mlx5e_is_macsec_device(priv->mdev)) {
1728 mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1729 return 0;
1730 }
1731
1732 macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1733 if (!macsec)
1734 return -ENOMEM;
1735
1736 INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1737 mutex_init(&macsec->lock);
1738
1739 err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1740 if (err) {
1741 mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1742 goto err_aso;
1743 }
1744
1745 macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1746 if (!macsec->wq) {
1747 err = -ENOMEM;
1748 goto err_wq;
1749 }
1750
1751 xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1752
1753 priv->macsec = macsec;
1754
1755 macsec->mdev = mdev;
1756
1757 macsec_fs = mlx5_macsec_fs_init(mdev);
1758 if (!macsec_fs) {
1759 err = -ENOMEM;
1760 goto err_out;
1761 }
1762
1763 mdev->macsec_fs = macsec_fs;
1764
1765 macsec->nb.notifier_call = macsec_obj_change_event;
1766 mlx5_notifier_register(mdev, &macsec->nb);
1767
1768 mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1769
1770 return 0;
1771
1772 err_out:
1773 destroy_workqueue(macsec->wq);
1774 err_wq:
1775 mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1776 err_aso:
1777 kfree(macsec);
1778 priv->macsec = NULL;
1779 return err;
1780 }
1781
mlx5e_macsec_cleanup(struct mlx5e_priv * priv)1782 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1783 {
1784 struct mlx5e_macsec *macsec = priv->macsec;
1785 struct mlx5_core_dev *mdev = priv->mdev;
1786
1787 if (!macsec)
1788 return;
1789
1790 mlx5_notifier_unregister(mdev, &macsec->nb);
1791 mlx5_macsec_fs_cleanup(mdev->macsec_fs);
1792 destroy_workqueue(macsec->wq);
1793 mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1794 mutex_destroy(&macsec->lock);
1795 kfree(macsec);
1796 }
1797