1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/xarray.h>
7
8 #include "en.h"
9 #include "lib/aso.h"
10 #include "lib/mlx5.h"
11 #include "en_accel/macsec.h"
12 #include "en_accel/macsec_fs.h"
13
14 #define MLX5_MACSEC_EPN_SCOPE_MID 0x80000000L
15 #define MLX5E_MACSEC_ASO_CTX_SZ MLX5_ST_SZ_BYTES(macsec_aso)
16
17 enum mlx5_macsec_aso_event_arm {
18 MLX5E_ASO_EPN_ARM = BIT(0),
19 };
20
21 enum {
22 MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
23 };
24
25 struct mlx5e_macsec_handle {
26 struct mlx5e_macsec *macsec;
27 u32 obj_id;
28 u8 idx;
29 };
30
31 enum {
32 MLX5_MACSEC_EPN,
33 };
34
35 struct mlx5e_macsec_aso_out {
36 u8 event_arm;
37 u32 mode_param;
38 };
39
40 struct mlx5e_macsec_aso_in {
41 u8 mode;
42 u32 obj_id;
43 };
44
45 struct mlx5e_macsec_epn_state {
46 u32 epn_msb;
47 u8 epn_enabled;
48 u8 overlap;
49 };
50
51 struct mlx5e_macsec_async_work {
52 struct mlx5e_macsec *macsec;
53 struct mlx5_core_dev *mdev;
54 struct work_struct work;
55 u32 obj_id;
56 };
57
58 struct mlx5e_macsec_sa {
59 bool active;
60 u8 assoc_num;
61 u32 macsec_obj_id;
62 u32 enc_key_id;
63 u32 next_pn;
64 sci_t sci;
65 ssci_t ssci;
66 salt_t salt;
67
68 struct rhash_head hash;
69 u32 fs_id;
70 union mlx5e_macsec_rule *macsec_rule;
71 struct rcu_head rcu_head;
72 struct mlx5e_macsec_epn_state epn_state;
73 };
74
75 struct mlx5e_macsec_rx_sc;
76 struct mlx5e_macsec_rx_sc_xarray_element {
77 u32 fs_id;
78 struct mlx5e_macsec_rx_sc *rx_sc;
79 };
80
81 struct mlx5e_macsec_rx_sc {
82 bool active;
83 sci_t sci;
84 struct mlx5e_macsec_sa *rx_sa[MACSEC_NUM_AN];
85 struct list_head rx_sc_list_element;
86 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
87 struct metadata_dst *md_dst;
88 struct rcu_head rcu_head;
89 };
90
91 struct mlx5e_macsec_umr {
92 u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)];
93 dma_addr_t dma_addr;
94 u32 mkey;
95 };
96
97 struct mlx5e_macsec_aso {
98 /* ASO */
99 struct mlx5_aso *maso;
100 /* Protects macsec ASO */
101 struct mutex aso_lock;
102 /* UMR */
103 struct mlx5e_macsec_umr *umr;
104
105 u32 pdn;
106 };
107
108 static const struct rhashtable_params rhash_sci = {
109 .key_len = sizeof_field(struct mlx5e_macsec_sa, sci),
110 .key_offset = offsetof(struct mlx5e_macsec_sa, sci),
111 .head_offset = offsetof(struct mlx5e_macsec_sa, hash),
112 .automatic_shrinking = true,
113 .min_size = 1,
114 };
115
116 struct mlx5e_macsec_device {
117 const struct net_device *netdev;
118 struct mlx5e_macsec_sa *tx_sa[MACSEC_NUM_AN];
119 struct list_head macsec_rx_sc_list_head;
120 unsigned char *dev_addr;
121 struct list_head macsec_device_list_element;
122 };
123
124 struct mlx5e_macsec {
125 struct list_head macsec_device_list_head;
126 int num_of_devices;
127 struct mlx5e_macsec_fs *macsec_fs;
128 struct mutex lock; /* Protects mlx5e_macsec internal contexts */
129
130 /* Tx sci -> fs id mapping handling */
131 struct rhashtable sci_hash; /* sci -> mlx5e_macsec_sa */
132
133 /* Rx fs_id -> rx_sc mapping */
134 struct xarray sc_xarray;
135
136 struct mlx5_core_dev *mdev;
137
138 /* Stats manage */
139 struct mlx5e_macsec_stats stats;
140
141 /* ASO */
142 struct mlx5e_macsec_aso aso;
143
144 struct notifier_block nb;
145 struct workqueue_struct *wq;
146 };
147
148 struct mlx5_macsec_obj_attrs {
149 u32 aso_pdn;
150 u32 next_pn;
151 __be64 sci;
152 u32 enc_key_id;
153 bool encrypt;
154 struct mlx5e_macsec_epn_state epn_state;
155 salt_t salt;
156 __be32 ssci;
157 bool replay_protect;
158 u32 replay_window;
159 };
160
161 struct mlx5_aso_ctrl_param {
162 u8 data_mask_mode;
163 u8 condition_0_operand;
164 u8 condition_1_operand;
165 u8 condition_0_offset;
166 u8 condition_1_offset;
167 u8 data_offset;
168 u8 condition_operand;
169 u32 condition_0_data;
170 u32 condition_0_mask;
171 u32 condition_1_data;
172 u32 condition_1_mask;
173 u64 bitwise_data;
174 u64 data_mask;
175 };
176
mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)177 static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
178 {
179 struct mlx5e_macsec_umr *umr;
180 struct device *dma_device;
181 dma_addr_t dma_addr;
182 int err;
183
184 umr = kzalloc(sizeof(*umr), GFP_KERNEL);
185 if (!umr) {
186 err = -ENOMEM;
187 return err;
188 }
189
190 dma_device = &mdev->pdev->dev;
191 dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
192 err = dma_mapping_error(dma_device, dma_addr);
193 if (err) {
194 mlx5_core_err(mdev, "Can't map dma device, err=%d\n", err);
195 goto out_dma;
196 }
197
198 err = mlx5e_create_mkey(mdev, aso->pdn, &umr->mkey);
199 if (err) {
200 mlx5_core_err(mdev, "Can't create mkey, err=%d\n", err);
201 goto out_mkey;
202 }
203
204 umr->dma_addr = dma_addr;
205
206 aso->umr = umr;
207
208 return 0;
209
210 out_mkey:
211 dma_unmap_single(dma_device, dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
212 out_dma:
213 kfree(umr);
214 return err;
215 }
216
mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev * mdev,struct mlx5e_macsec_aso * aso)217 static void mlx5e_macsec_aso_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macsec_aso *aso)
218 {
219 struct mlx5e_macsec_umr *umr = aso->umr;
220
221 mlx5_core_destroy_mkey(mdev, umr->mkey);
222 dma_unmap_single(&mdev->pdev->dev, umr->dma_addr, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
223 kfree(umr);
224 }
225
macsec_set_replay_protection(struct mlx5_macsec_obj_attrs * attrs,void * aso_ctx)226 static int macsec_set_replay_protection(struct mlx5_macsec_obj_attrs *attrs, void *aso_ctx)
227 {
228 u8 window_sz;
229
230 if (!attrs->replay_protect)
231 return 0;
232
233 switch (attrs->replay_window) {
234 case 256:
235 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_256BIT;
236 break;
237 case 128:
238 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_128BIT;
239 break;
240 case 64:
241 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_64BIT;
242 break;
243 case 32:
244 window_sz = MLX5_MACSEC_ASO_REPLAY_WIN_32BIT;
245 break;
246 default:
247 return -EINVAL;
248 }
249 MLX5_SET(macsec_aso, aso_ctx, window_size, window_sz);
250 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_REPLAY_PROTECTION);
251
252 return 0;
253 }
254
mlx5e_macsec_create_object(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,bool is_tx,u32 * macsec_obj_id)255 static int mlx5e_macsec_create_object(struct mlx5_core_dev *mdev,
256 struct mlx5_macsec_obj_attrs *attrs,
257 bool is_tx,
258 u32 *macsec_obj_id)
259 {
260 u32 in[MLX5_ST_SZ_DW(create_macsec_obj_in)] = {};
261 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
262 void *aso_ctx;
263 void *obj;
264 int err;
265
266 obj = MLX5_ADDR_OF(create_macsec_obj_in, in, macsec_object);
267 aso_ctx = MLX5_ADDR_OF(macsec_offload_obj, obj, macsec_aso);
268
269 MLX5_SET(macsec_offload_obj, obj, confidentiality_en, attrs->encrypt);
270 MLX5_SET(macsec_offload_obj, obj, dekn, attrs->enc_key_id);
271 MLX5_SET(macsec_offload_obj, obj, aso_return_reg, MLX5_MACSEC_ASO_REG_C_4_5);
272 MLX5_SET(macsec_offload_obj, obj, macsec_aso_access_pd, attrs->aso_pdn);
273 MLX5_SET(macsec_aso, aso_ctx, mode_parameter, attrs->next_pn);
274
275 /* Epn */
276 if (attrs->epn_state.epn_enabled) {
277 void *salt_p;
278 int i;
279
280 MLX5_SET(macsec_aso, aso_ctx, epn_event_arm, 1);
281 MLX5_SET(macsec_offload_obj, obj, epn_en, 1);
282 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
283 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
284 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)attrs->ssci);
285 salt_p = MLX5_ADDR_OF(macsec_offload_obj, obj, salt);
286 for (i = 0; i < 3 ; i++)
287 memcpy((u32 *)salt_p + i, &attrs->salt.bytes[4 * (2 - i)], 4);
288 } else {
289 MLX5_SET64(macsec_offload_obj, obj, sci, (__force u64)(attrs->sci));
290 }
291
292 MLX5_SET(macsec_aso, aso_ctx, valid, 0x1);
293 if (is_tx) {
294 MLX5_SET(macsec_aso, aso_ctx, mode, MLX5_MACSEC_ASO_INC_SN);
295 } else {
296 err = macsec_set_replay_protection(attrs, aso_ctx);
297 if (err)
298 return err;
299 }
300
301 /* general object fields set */
302 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
303 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
304
305 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
306 if (err) {
307 mlx5_core_err(mdev,
308 "MACsec offload: Failed to create MACsec object (err = %d)\n",
309 err);
310 return err;
311 }
312
313 *macsec_obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
314
315 return err;
316 }
317
mlx5e_macsec_destroy_object(struct mlx5_core_dev * mdev,u32 macsec_obj_id)318 static void mlx5e_macsec_destroy_object(struct mlx5_core_dev *mdev, u32 macsec_obj_id)
319 {
320 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
321 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
322
323 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
324 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
325 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_obj_id);
326
327 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
328 }
329
mlx5e_macsec_cleanup_sa(struct mlx5e_macsec * macsec,struct mlx5e_macsec_sa * sa,bool is_tx)330 static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
331 struct mlx5e_macsec_sa *sa,
332 bool is_tx)
333 {
334 int action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
335 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
336
337 if ((is_tx) && sa->fs_id) {
338 /* Make sure ongoing datapath readers sees a valid SA */
339 rhashtable_remove_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
340 sa->fs_id = 0;
341 }
342
343 if (!sa->macsec_rule)
344 return;
345
346 mlx5e_macsec_fs_del_rule(macsec->macsec_fs, sa->macsec_rule, action);
347 mlx5e_macsec_destroy_object(macsec->mdev, sa->macsec_obj_id);
348 sa->macsec_rule = NULL;
349 }
350
mlx5e_macsec_init_sa(struct macsec_context * ctx,struct mlx5e_macsec_sa * sa,bool encrypt,bool is_tx)351 static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
352 struct mlx5e_macsec_sa *sa,
353 bool encrypt,
354 bool is_tx)
355 {
356 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
357 struct mlx5e_macsec *macsec = priv->macsec;
358 struct mlx5_macsec_rule_attrs rule_attrs;
359 struct mlx5_core_dev *mdev = priv->mdev;
360 struct mlx5_macsec_obj_attrs obj_attrs;
361 union mlx5e_macsec_rule *macsec_rule;
362 int err;
363
364 obj_attrs.next_pn = sa->next_pn;
365 obj_attrs.sci = cpu_to_be64((__force u64)sa->sci);
366 obj_attrs.enc_key_id = sa->enc_key_id;
367 obj_attrs.encrypt = encrypt;
368 obj_attrs.aso_pdn = macsec->aso.pdn;
369 obj_attrs.epn_state = sa->epn_state;
370
371 if (sa->epn_state.epn_enabled) {
372 obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
373 memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
374 }
375
376 obj_attrs.replay_window = ctx->secy->replay_window;
377 obj_attrs.replay_protect = ctx->secy->replay_protect;
378
379 err = mlx5e_macsec_create_object(mdev, &obj_attrs, is_tx, &sa->macsec_obj_id);
380 if (err)
381 return err;
382
383 rule_attrs.macsec_obj_id = sa->macsec_obj_id;
384 rule_attrs.sci = sa->sci;
385 rule_attrs.assoc_num = sa->assoc_num;
386 rule_attrs.action = (is_tx) ? MLX5_ACCEL_MACSEC_ACTION_ENCRYPT :
387 MLX5_ACCEL_MACSEC_ACTION_DECRYPT;
388
389 macsec_rule = mlx5e_macsec_fs_add_rule(macsec->macsec_fs, ctx, &rule_attrs, &sa->fs_id);
390 if (!macsec_rule) {
391 err = -ENOMEM;
392 goto destroy_macsec_object;
393 }
394
395 sa->macsec_rule = macsec_rule;
396
397 if (is_tx) {
398 err = rhashtable_insert_fast(&macsec->sci_hash, &sa->hash, rhash_sci);
399 if (err)
400 goto destroy_macsec_object_and_rule;
401 }
402
403 return 0;
404
405 destroy_macsec_object_and_rule:
406 mlx5e_macsec_cleanup_sa(macsec, sa, is_tx);
407 destroy_macsec_object:
408 mlx5e_macsec_destroy_object(mdev, sa->macsec_obj_id);
409
410 return err;
411 }
412
413 static struct mlx5e_macsec_rx_sc *
mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head * list,sci_t sci)414 mlx5e_macsec_get_rx_sc_from_sc_list(const struct list_head *list, sci_t sci)
415 {
416 struct mlx5e_macsec_rx_sc *iter;
417
418 list_for_each_entry_rcu(iter, list, rx_sc_list_element) {
419 if (iter->sci == sci)
420 return iter;
421 }
422
423 return NULL;
424 }
425
macsec_rx_sa_active_update(struct macsec_context * ctx,struct mlx5e_macsec_sa * rx_sa,bool active)426 static int macsec_rx_sa_active_update(struct macsec_context *ctx,
427 struct mlx5e_macsec_sa *rx_sa,
428 bool active)
429 {
430 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
431 struct mlx5e_macsec *macsec = priv->macsec;
432 int err = 0;
433
434 if (rx_sa->active == active)
435 return 0;
436
437 rx_sa->active = active;
438 if (!active) {
439 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
440 return 0;
441 }
442
443 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
444 if (err)
445 rx_sa->active = false;
446
447 return err;
448 }
449
mlx5e_macsec_secy_features_validate(struct macsec_context * ctx)450 static bool mlx5e_macsec_secy_features_validate(struct macsec_context *ctx)
451 {
452 const struct net_device *netdev = ctx->netdev;
453 const struct macsec_secy *secy = ctx->secy;
454
455 if (secy->validate_frames != MACSEC_VALIDATE_STRICT) {
456 netdev_err(netdev,
457 "MACsec offload is supported only when validate_frame is in strict mode\n");
458 return false;
459 }
460
461 if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN) {
462 netdev_err(netdev, "MACsec offload is supported only when icv_len is %d\n",
463 MACSEC_DEFAULT_ICV_LEN);
464 return false;
465 }
466
467 if (!secy->protect_frames) {
468 netdev_err(netdev,
469 "MACsec offload is supported only when protect_frames is set\n");
470 return false;
471 }
472
473 if (!ctx->secy->tx_sc.encrypt) {
474 netdev_err(netdev, "MACsec offload: encrypt off isn't supported\n");
475 return false;
476 }
477
478 return true;
479 }
480
481 static struct mlx5e_macsec_device *
mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec * macsec,const struct macsec_context * ctx)482 mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
483 const struct macsec_context *ctx)
484 {
485 struct mlx5e_macsec_device *iter;
486 const struct list_head *list;
487
488 list = &macsec->macsec_device_list_head;
489 list_for_each_entry_rcu(iter, list, macsec_device_list_element) {
490 if (iter->netdev == ctx->secy->netdev)
491 return iter;
492 }
493
494 return NULL;
495 }
496
update_macsec_epn(struct mlx5e_macsec_sa * sa,const struct macsec_key * key,const pn_t * next_pn_halves,ssci_t ssci)497 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
498 const pn_t *next_pn_halves, ssci_t ssci)
499 {
500 struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
501
502 sa->ssci = ssci;
503 sa->salt = key->salt;
504 epn_state->epn_enabled = 1;
505 epn_state->epn_msb = next_pn_halves->upper;
506 epn_state->overlap = next_pn_halves->lower < MLX5_MACSEC_EPN_SCOPE_MID ? 0 : 1;
507 }
508
mlx5e_macsec_add_txsa(struct macsec_context * ctx)509 static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
510 {
511 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
512 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
513 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
514 const struct macsec_secy *secy = ctx->secy;
515 struct mlx5e_macsec_device *macsec_device;
516 struct mlx5_core_dev *mdev = priv->mdev;
517 u8 assoc_num = ctx->sa.assoc_num;
518 struct mlx5e_macsec_sa *tx_sa;
519 struct mlx5e_macsec *macsec;
520 int err = 0;
521
522 mutex_lock(&priv->macsec->lock);
523
524 macsec = priv->macsec;
525 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
526 if (!macsec_device) {
527 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
528 err = -EEXIST;
529 goto out;
530 }
531
532 if (macsec_device->tx_sa[assoc_num]) {
533 netdev_err(ctx->netdev, "MACsec offload tx_sa: %d already exist\n", assoc_num);
534 err = -EEXIST;
535 goto out;
536 }
537
538 tx_sa = kzalloc(sizeof(*tx_sa), GFP_KERNEL);
539 if (!tx_sa) {
540 err = -ENOMEM;
541 goto out;
542 }
543
544 tx_sa->active = ctx_tx_sa->active;
545 tx_sa->next_pn = ctx_tx_sa->next_pn_halves.lower;
546 tx_sa->sci = secy->sci;
547 tx_sa->assoc_num = assoc_num;
548
549 if (secy->xpn)
550 update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
551 ctx_tx_sa->ssci);
552
553 err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
554 MLX5_ACCEL_OBJ_MACSEC_KEY,
555 &tx_sa->enc_key_id);
556 if (err)
557 goto destroy_sa;
558
559 macsec_device->tx_sa[assoc_num] = tx_sa;
560 if (!secy->operational ||
561 assoc_num != tx_sc->encoding_sa ||
562 !tx_sa->active)
563 goto out;
564
565 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
566 if (err)
567 goto destroy_encryption_key;
568
569 mutex_unlock(&macsec->lock);
570
571 return 0;
572
573 destroy_encryption_key:
574 macsec_device->tx_sa[assoc_num] = NULL;
575 mlx5_destroy_encryption_key(mdev, tx_sa->enc_key_id);
576 destroy_sa:
577 kfree(tx_sa);
578 out:
579 mutex_unlock(&macsec->lock);
580
581 return err;
582 }
583
mlx5e_macsec_upd_txsa(struct macsec_context * ctx)584 static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
585 {
586 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
587 const struct macsec_tx_sa *ctx_tx_sa = ctx->sa.tx_sa;
588 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
589 struct mlx5e_macsec_device *macsec_device;
590 u8 assoc_num = ctx->sa.assoc_num;
591 struct mlx5e_macsec_sa *tx_sa;
592 struct mlx5e_macsec *macsec;
593 struct net_device *netdev;
594 int err = 0;
595
596 mutex_lock(&priv->macsec->lock);
597
598 macsec = priv->macsec;
599 netdev = ctx->netdev;
600 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
601 if (!macsec_device) {
602 netdev_err(netdev, "MACsec offload: Failed to find device context\n");
603 err = -EINVAL;
604 goto out;
605 }
606
607 tx_sa = macsec_device->tx_sa[assoc_num];
608 if (!tx_sa) {
609 netdev_err(netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
610 err = -EEXIST;
611 goto out;
612 }
613
614 if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
615 netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
616 assoc_num);
617 err = -EINVAL;
618 goto out;
619 }
620
621 if (tx_sa->active == ctx_tx_sa->active)
622 goto out;
623
624 tx_sa->active = ctx_tx_sa->active;
625 if (tx_sa->assoc_num != tx_sc->encoding_sa)
626 goto out;
627
628 if (ctx_tx_sa->active) {
629 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
630 if (err)
631 goto out;
632 } else {
633 if (!tx_sa->macsec_rule) {
634 err = -EINVAL;
635 goto out;
636 }
637
638 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
639 }
640 out:
641 mutex_unlock(&macsec->lock);
642
643 return err;
644 }
645
mlx5e_macsec_del_txsa(struct macsec_context * ctx)646 static int mlx5e_macsec_del_txsa(struct macsec_context *ctx)
647 {
648 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
649 struct mlx5e_macsec_device *macsec_device;
650 u8 assoc_num = ctx->sa.assoc_num;
651 struct mlx5e_macsec_sa *tx_sa;
652 struct mlx5e_macsec *macsec;
653 int err = 0;
654
655 mutex_lock(&priv->macsec->lock);
656 macsec = priv->macsec;
657 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
658 if (!macsec_device) {
659 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
660 err = -EINVAL;
661 goto out;
662 }
663
664 tx_sa = macsec_device->tx_sa[assoc_num];
665 if (!tx_sa) {
666 netdev_err(ctx->netdev, "MACsec offload: TX sa 0x%x doesn't exist\n", assoc_num);
667 err = -EEXIST;
668 goto out;
669 }
670
671 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
672 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
673 kfree_rcu(tx_sa);
674 macsec_device->tx_sa[assoc_num] = NULL;
675
676 out:
677 mutex_unlock(&macsec->lock);
678
679 return err;
680 }
681
mlx5e_macsec_get_sa_from_hashtable(struct rhashtable * sci_hash,sci_t * sci)682 static u32 mlx5e_macsec_get_sa_from_hashtable(struct rhashtable *sci_hash, sci_t *sci)
683 {
684 struct mlx5e_macsec_sa *macsec_sa;
685 u32 fs_id = 0;
686
687 rcu_read_lock();
688 macsec_sa = rhashtable_lookup(sci_hash, sci, rhash_sci);
689 if (macsec_sa)
690 fs_id = macsec_sa->fs_id;
691 rcu_read_unlock();
692
693 return fs_id;
694 }
695
mlx5e_macsec_add_rxsc(struct macsec_context * ctx)696 static int mlx5e_macsec_add_rxsc(struct macsec_context *ctx)
697 {
698 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
699 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
700 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
701 struct mlx5e_macsec_device *macsec_device;
702 struct mlx5e_macsec_rx_sc *rx_sc;
703 struct list_head *rx_sc_list;
704 struct mlx5e_macsec *macsec;
705 int err = 0;
706
707 mutex_lock(&priv->macsec->lock);
708 macsec = priv->macsec;
709 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
710 if (!macsec_device) {
711 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
712 err = -EINVAL;
713 goto out;
714 }
715
716 rx_sc_list = &macsec_device->macsec_rx_sc_list_head;
717 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(rx_sc_list, ctx_rx_sc->sci);
718 if (rx_sc) {
719 netdev_err(ctx->netdev, "MACsec offload: rx_sc (sci %lld) already exists\n",
720 ctx_rx_sc->sci);
721 err = -EEXIST;
722 goto out;
723 }
724
725 rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
726 if (!rx_sc) {
727 err = -ENOMEM;
728 goto out;
729 }
730
731 sc_xarray_element = kzalloc(sizeof(*sc_xarray_element), GFP_KERNEL);
732 if (!sc_xarray_element) {
733 err = -ENOMEM;
734 goto destroy_rx_sc;
735 }
736
737 sc_xarray_element->rx_sc = rx_sc;
738 err = xa_alloc(&macsec->sc_xarray, &sc_xarray_element->fs_id, sc_xarray_element,
739 XA_LIMIT(1, MLX5_MACEC_RX_FS_ID_MAX), GFP_KERNEL);
740 if (err) {
741 if (err == -EBUSY)
742 netdev_err(ctx->netdev,
743 "MACsec offload: unable to create entry for RX SC (%d Rx SCs already allocated)\n",
744 MLX5_MACEC_RX_FS_ID_MAX);
745 goto destroy_sc_xarray_elemenet;
746 }
747
748 rx_sc->md_dst = metadata_dst_alloc(0, METADATA_MACSEC, GFP_KERNEL);
749 if (!rx_sc->md_dst) {
750 err = -ENOMEM;
751 goto erase_xa_alloc;
752 }
753
754 rx_sc->sci = ctx_rx_sc->sci;
755 rx_sc->active = ctx_rx_sc->active;
756 list_add_rcu(&rx_sc->rx_sc_list_element, rx_sc_list);
757
758 rx_sc->sc_xarray_element = sc_xarray_element;
759 rx_sc->md_dst->u.macsec_info.sci = rx_sc->sci;
760 mutex_unlock(&macsec->lock);
761
762 return 0;
763
764 erase_xa_alloc:
765 xa_erase(&macsec->sc_xarray, sc_xarray_element->fs_id);
766 destroy_sc_xarray_elemenet:
767 kfree(sc_xarray_element);
768 destroy_rx_sc:
769 kfree(rx_sc);
770
771 out:
772 mutex_unlock(&macsec->lock);
773
774 return err;
775 }
776
mlx5e_macsec_upd_rxsc(struct macsec_context * ctx)777 static int mlx5e_macsec_upd_rxsc(struct macsec_context *ctx)
778 {
779 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
780 const struct macsec_rx_sc *ctx_rx_sc = ctx->rx_sc;
781 struct mlx5e_macsec_device *macsec_device;
782 struct mlx5e_macsec_rx_sc *rx_sc;
783 struct mlx5e_macsec_sa *rx_sa;
784 struct mlx5e_macsec *macsec;
785 struct list_head *list;
786 int i;
787 int err = 0;
788
789 mutex_lock(&priv->macsec->lock);
790
791 macsec = priv->macsec;
792 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
793 if (!macsec_device) {
794 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
795 err = -EINVAL;
796 goto out;
797 }
798
799 list = &macsec_device->macsec_rx_sc_list_head;
800 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx_rx_sc->sci);
801 if (!rx_sc) {
802 err = -EINVAL;
803 goto out;
804 }
805
806 if (rx_sc->active == ctx_rx_sc->active)
807 goto out;
808
809 rx_sc->active = ctx_rx_sc->active;
810 for (i = 0; i < MACSEC_NUM_AN; ++i) {
811 rx_sa = rx_sc->rx_sa[i];
812 if (!rx_sa)
813 continue;
814
815 err = macsec_rx_sa_active_update(ctx, rx_sa, rx_sa->active && ctx_rx_sc->active);
816 if (err)
817 goto out;
818 }
819
820 out:
821 mutex_unlock(&macsec->lock);
822
823 return err;
824 }
825
macsec_del_rxsc_ctx(struct mlx5e_macsec * macsec,struct mlx5e_macsec_rx_sc * rx_sc)826 static void macsec_del_rxsc_ctx(struct mlx5e_macsec *macsec, struct mlx5e_macsec_rx_sc *rx_sc)
827 {
828 struct mlx5e_macsec_sa *rx_sa;
829 int i;
830
831 for (i = 0; i < MACSEC_NUM_AN; ++i) {
832 rx_sa = rx_sc->rx_sa[i];
833 if (!rx_sa)
834 continue;
835
836 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
837 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
838
839 kfree(rx_sa);
840 rx_sc->rx_sa[i] = NULL;
841 }
842
843 /* At this point the relevant MACsec offload Rx rule already removed at
844 * mlx5e_macsec_cleanup_sa need to wait for datapath to finish current
845 * Rx related data propagating using xa_erase which uses rcu to sync,
846 * once fs_id is erased then this rx_sc is hidden from datapath.
847 */
848 list_del_rcu(&rx_sc->rx_sc_list_element);
849 xa_erase(&macsec->sc_xarray, rx_sc->sc_xarray_element->fs_id);
850 metadata_dst_free(rx_sc->md_dst);
851 kfree(rx_sc->sc_xarray_element);
852 kfree_rcu(rx_sc);
853 }
854
mlx5e_macsec_del_rxsc(struct macsec_context * ctx)855 static int mlx5e_macsec_del_rxsc(struct macsec_context *ctx)
856 {
857 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
858 struct mlx5e_macsec_device *macsec_device;
859 struct mlx5e_macsec_rx_sc *rx_sc;
860 struct mlx5e_macsec *macsec;
861 struct list_head *list;
862 int err = 0;
863
864 mutex_lock(&priv->macsec->lock);
865
866 macsec = priv->macsec;
867 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
868 if (!macsec_device) {
869 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
870 err = -EINVAL;
871 goto out;
872 }
873
874 list = &macsec_device->macsec_rx_sc_list_head;
875 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, ctx->rx_sc->sci);
876 if (!rx_sc) {
877 netdev_err(ctx->netdev,
878 "MACsec offload rx_sc sci %lld doesn't exist\n",
879 ctx->sa.rx_sa->sc->sci);
880 err = -EINVAL;
881 goto out;
882 }
883
884 macsec_del_rxsc_ctx(macsec, rx_sc);
885 out:
886 mutex_unlock(&macsec->lock);
887
888 return err;
889 }
890
mlx5e_macsec_add_rxsa(struct macsec_context * ctx)891 static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
892 {
893 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
894 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
895 struct mlx5e_macsec_device *macsec_device;
896 struct mlx5_core_dev *mdev = priv->mdev;
897 u8 assoc_num = ctx->sa.assoc_num;
898 struct mlx5e_macsec_rx_sc *rx_sc;
899 sci_t sci = ctx_rx_sa->sc->sci;
900 struct mlx5e_macsec_sa *rx_sa;
901 struct mlx5e_macsec *macsec;
902 struct list_head *list;
903 int err = 0;
904
905 mutex_lock(&priv->macsec->lock);
906
907 macsec = priv->macsec;
908 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
909 if (!macsec_device) {
910 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
911 err = -EINVAL;
912 goto out;
913 }
914
915 list = &macsec_device->macsec_rx_sc_list_head;
916 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
917 if (!rx_sc) {
918 netdev_err(ctx->netdev,
919 "MACsec offload rx_sc sci %lld doesn't exist\n",
920 ctx->sa.rx_sa->sc->sci);
921 err = -EINVAL;
922 goto out;
923 }
924
925 if (rx_sc->rx_sa[assoc_num]) {
926 netdev_err(ctx->netdev,
927 "MACsec offload rx_sc sci %lld rx_sa %d already exist\n",
928 sci, assoc_num);
929 err = -EEXIST;
930 goto out;
931 }
932
933 rx_sa = kzalloc(sizeof(*rx_sa), GFP_KERNEL);
934 if (!rx_sa) {
935 err = -ENOMEM;
936 goto out;
937 }
938
939 rx_sa->active = ctx_rx_sa->active;
940 rx_sa->next_pn = ctx_rx_sa->next_pn;
941 rx_sa->sci = sci;
942 rx_sa->assoc_num = assoc_num;
943 rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
944
945 if (ctx->secy->xpn)
946 update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
947 ctx_rx_sa->ssci);
948
949 err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
950 MLX5_ACCEL_OBJ_MACSEC_KEY,
951 &rx_sa->enc_key_id);
952 if (err)
953 goto destroy_sa;
954
955 rx_sc->rx_sa[assoc_num] = rx_sa;
956 if (!rx_sa->active)
957 goto out;
958
959 //TODO - add support for both authentication and encryption flows
960 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
961 if (err)
962 goto destroy_encryption_key;
963
964 goto out;
965
966 destroy_encryption_key:
967 rx_sc->rx_sa[assoc_num] = NULL;
968 mlx5_destroy_encryption_key(mdev, rx_sa->enc_key_id);
969 destroy_sa:
970 kfree(rx_sa);
971 out:
972 mutex_unlock(&macsec->lock);
973
974 return err;
975 }
976
mlx5e_macsec_upd_rxsa(struct macsec_context * ctx)977 static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
978 {
979 const struct macsec_rx_sa *ctx_rx_sa = ctx->sa.rx_sa;
980 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
981 struct mlx5e_macsec_device *macsec_device;
982 u8 assoc_num = ctx->sa.assoc_num;
983 struct mlx5e_macsec_rx_sc *rx_sc;
984 sci_t sci = ctx_rx_sa->sc->sci;
985 struct mlx5e_macsec_sa *rx_sa;
986 struct mlx5e_macsec *macsec;
987 struct list_head *list;
988 int err = 0;
989
990 mutex_lock(&priv->macsec->lock);
991
992 macsec = priv->macsec;
993 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
994 if (!macsec_device) {
995 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
996 err = -EINVAL;
997 goto out;
998 }
999
1000 list = &macsec_device->macsec_rx_sc_list_head;
1001 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1002 if (!rx_sc) {
1003 netdev_err(ctx->netdev,
1004 "MACsec offload rx_sc sci %lld doesn't exist\n",
1005 ctx->sa.rx_sa->sc->sci);
1006 err = -EINVAL;
1007 goto out;
1008 }
1009
1010 rx_sa = rx_sc->rx_sa[assoc_num];
1011 if (!rx_sa) {
1012 netdev_err(ctx->netdev,
1013 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1014 sci, assoc_num);
1015 err = -EINVAL;
1016 goto out;
1017 }
1018
1019 if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
1020 netdev_err(ctx->netdev,
1021 "MACsec offload update RX sa %d PN isn't supported\n",
1022 assoc_num);
1023 err = -EINVAL;
1024 goto out;
1025 }
1026
1027 err = macsec_rx_sa_active_update(ctx, rx_sa, ctx_rx_sa->active);
1028 out:
1029 mutex_unlock(&macsec->lock);
1030
1031 return err;
1032 }
1033
mlx5e_macsec_del_rxsa(struct macsec_context * ctx)1034 static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx)
1035 {
1036 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1037 struct mlx5e_macsec_device *macsec_device;
1038 sci_t sci = ctx->sa.rx_sa->sc->sci;
1039 struct mlx5e_macsec_rx_sc *rx_sc;
1040 u8 assoc_num = ctx->sa.assoc_num;
1041 struct mlx5e_macsec_sa *rx_sa;
1042 struct mlx5e_macsec *macsec;
1043 struct list_head *list;
1044 int err = 0;
1045
1046 mutex_lock(&priv->macsec->lock);
1047
1048 macsec = priv->macsec;
1049 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1050 if (!macsec_device) {
1051 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1052 err = -EINVAL;
1053 goto out;
1054 }
1055
1056 list = &macsec_device->macsec_rx_sc_list_head;
1057 rx_sc = mlx5e_macsec_get_rx_sc_from_sc_list(list, sci);
1058 if (!rx_sc) {
1059 netdev_err(ctx->netdev,
1060 "MACsec offload rx_sc sci %lld doesn't exist\n",
1061 ctx->sa.rx_sa->sc->sci);
1062 err = -EINVAL;
1063 goto out;
1064 }
1065
1066 rx_sa = rx_sc->rx_sa[assoc_num];
1067 if (!rx_sa) {
1068 netdev_err(ctx->netdev,
1069 "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n",
1070 sci, assoc_num);
1071 err = -EINVAL;
1072 goto out;
1073 }
1074
1075 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1076 mlx5_destroy_encryption_key(macsec->mdev, rx_sa->enc_key_id);
1077 kfree(rx_sa);
1078 rx_sc->rx_sa[assoc_num] = NULL;
1079
1080 out:
1081 mutex_unlock(&macsec->lock);
1082
1083 return err;
1084 }
1085
mlx5e_macsec_add_secy(struct macsec_context * ctx)1086 static int mlx5e_macsec_add_secy(struct macsec_context *ctx)
1087 {
1088 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1089 const struct net_device *dev = ctx->secy->netdev;
1090 const struct net_device *netdev = ctx->netdev;
1091 struct mlx5e_macsec_device *macsec_device;
1092 struct mlx5e_macsec *macsec;
1093 int err = 0;
1094
1095 if (!mlx5e_macsec_secy_features_validate(ctx))
1096 return -EINVAL;
1097
1098 mutex_lock(&priv->macsec->lock);
1099 macsec = priv->macsec;
1100 if (mlx5e_macsec_get_macsec_device_context(macsec, ctx)) {
1101 netdev_err(netdev, "MACsec offload: MACsec net_device already exist\n");
1102 goto out;
1103 }
1104
1105 if (macsec->num_of_devices >= MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES) {
1106 netdev_err(netdev, "Currently, only %d MACsec offload devices can be set\n",
1107 MLX5_MACSEC_NUM_OF_SUPPORTED_INTERFACES);
1108 err = -EBUSY;
1109 goto out;
1110 }
1111
1112 macsec_device = kzalloc(sizeof(*macsec_device), GFP_KERNEL);
1113 if (!macsec_device) {
1114 err = -ENOMEM;
1115 goto out;
1116 }
1117
1118 macsec_device->dev_addr = kmemdup(dev->dev_addr, dev->addr_len, GFP_KERNEL);
1119 if (!macsec_device->dev_addr) {
1120 kfree(macsec_device);
1121 err = -ENOMEM;
1122 goto out;
1123 }
1124
1125 macsec_device->netdev = dev;
1126
1127 INIT_LIST_HEAD_RCU(&macsec_device->macsec_rx_sc_list_head);
1128 list_add_rcu(&macsec_device->macsec_device_list_element, &macsec->macsec_device_list_head);
1129
1130 ++macsec->num_of_devices;
1131 out:
1132 mutex_unlock(&macsec->lock);
1133
1134 return err;
1135 }
1136
macsec_upd_secy_hw_address(struct macsec_context * ctx,struct mlx5e_macsec_device * macsec_device)1137 static int macsec_upd_secy_hw_address(struct macsec_context *ctx,
1138 struct mlx5e_macsec_device *macsec_device)
1139 {
1140 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1141 const struct net_device *dev = ctx->secy->netdev;
1142 struct mlx5e_macsec *macsec = priv->macsec;
1143 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1144 struct mlx5e_macsec_sa *rx_sa;
1145 struct list_head *list;
1146 int i, err = 0;
1147
1148
1149 list = &macsec_device->macsec_rx_sc_list_head;
1150 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1151 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1152 rx_sa = rx_sc->rx_sa[i];
1153 if (!rx_sa || !rx_sa->macsec_rule)
1154 continue;
1155
1156 mlx5e_macsec_cleanup_sa(macsec, rx_sa, false);
1157 }
1158 }
1159
1160 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element) {
1161 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1162 rx_sa = rx_sc->rx_sa[i];
1163 if (!rx_sa)
1164 continue;
1165
1166 if (rx_sa->active) {
1167 err = mlx5e_macsec_init_sa(ctx, rx_sa, true, false);
1168 if (err)
1169 goto out;
1170 }
1171 }
1172 }
1173
1174 memcpy(macsec_device->dev_addr, dev->dev_addr, dev->addr_len);
1175 out:
1176 return err;
1177 }
1178
1179 /* this function is called from 2 macsec ops functions:
1180 * macsec_set_mac_address – MAC address was changed, therefore we need to destroy
1181 * and create new Tx contexts(macsec object + steering).
1182 * macsec_changelink – in this case the tx SC or SecY may be changed, therefore need to
1183 * destroy Tx and Rx contexts(macsec object + steering)
1184 */
mlx5e_macsec_upd_secy(struct macsec_context * ctx)1185 static int mlx5e_macsec_upd_secy(struct macsec_context *ctx)
1186 {
1187 const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
1188 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1189 const struct net_device *dev = ctx->secy->netdev;
1190 struct mlx5e_macsec_device *macsec_device;
1191 struct mlx5e_macsec_sa *tx_sa;
1192 struct mlx5e_macsec *macsec;
1193 int i, err = 0;
1194
1195 if (!mlx5e_macsec_secy_features_validate(ctx))
1196 return -EINVAL;
1197
1198 mutex_lock(&priv->macsec->lock);
1199
1200 macsec = priv->macsec;
1201 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1202 if (!macsec_device) {
1203 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1204 err = -EINVAL;
1205 goto out;
1206 }
1207
1208 /* if the dev_addr hasn't change, it mean the callback is from macsec_changelink */
1209 if (!memcmp(macsec_device->dev_addr, dev->dev_addr, dev->addr_len)) {
1210 err = macsec_upd_secy_hw_address(ctx, macsec_device);
1211 if (err)
1212 goto out;
1213 }
1214
1215 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1216 tx_sa = macsec_device->tx_sa[i];
1217 if (!tx_sa)
1218 continue;
1219
1220 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1221 }
1222
1223 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1224 tx_sa = macsec_device->tx_sa[i];
1225 if (!tx_sa)
1226 continue;
1227
1228 if (tx_sa->assoc_num == tx_sc->encoding_sa && tx_sa->active) {
1229 err = mlx5e_macsec_init_sa(ctx, tx_sa, tx_sc->encrypt, true);
1230 if (err)
1231 goto out;
1232 }
1233 }
1234
1235 out:
1236 mutex_unlock(&macsec->lock);
1237
1238 return err;
1239 }
1240
mlx5e_macsec_del_secy(struct macsec_context * ctx)1241 static int mlx5e_macsec_del_secy(struct macsec_context *ctx)
1242 {
1243 struct mlx5e_priv *priv = netdev_priv(ctx->netdev);
1244 struct mlx5e_macsec_device *macsec_device;
1245 struct mlx5e_macsec_rx_sc *rx_sc, *tmp;
1246 struct mlx5e_macsec_sa *tx_sa;
1247 struct mlx5e_macsec *macsec;
1248 struct list_head *list;
1249 int err = 0;
1250 int i;
1251
1252 mutex_lock(&priv->macsec->lock);
1253 macsec = priv->macsec;
1254 macsec_device = mlx5e_macsec_get_macsec_device_context(macsec, ctx);
1255 if (!macsec_device) {
1256 netdev_err(ctx->netdev, "MACsec offload: Failed to find device context\n");
1257 err = -EINVAL;
1258
1259 goto out;
1260 }
1261
1262 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1263 tx_sa = macsec_device->tx_sa[i];
1264 if (!tx_sa)
1265 continue;
1266
1267 mlx5e_macsec_cleanup_sa(macsec, tx_sa, true);
1268 mlx5_destroy_encryption_key(macsec->mdev, tx_sa->enc_key_id);
1269 kfree(tx_sa);
1270 macsec_device->tx_sa[i] = NULL;
1271 }
1272
1273 list = &macsec_device->macsec_rx_sc_list_head;
1274 list_for_each_entry_safe(rx_sc, tmp, list, rx_sc_list_element)
1275 macsec_del_rxsc_ctx(macsec, rx_sc);
1276
1277 kfree(macsec_device->dev_addr);
1278 macsec_device->dev_addr = NULL;
1279
1280 list_del_rcu(&macsec_device->macsec_device_list_element);
1281 --macsec->num_of_devices;
1282 kfree(macsec_device);
1283
1284 out:
1285 mutex_unlock(&macsec->lock);
1286
1287 return err;
1288 }
1289
macsec_build_accel_attrs(struct mlx5e_macsec_sa * sa,struct mlx5_macsec_obj_attrs * attrs)1290 static void macsec_build_accel_attrs(struct mlx5e_macsec_sa *sa,
1291 struct mlx5_macsec_obj_attrs *attrs)
1292 {
1293 attrs->epn_state.epn_msb = sa->epn_state.epn_msb;
1294 attrs->epn_state.overlap = sa->epn_state.overlap;
1295 }
1296
macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso * macsec_aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5_aso_ctrl_param * param)1297 static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
1298 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1299 struct mlx5_aso_ctrl_param *param)
1300 {
1301 memset(aso_ctrl, 0, sizeof(*aso_ctrl));
1302 if (macsec_aso->umr->dma_addr) {
1303 aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
1304 aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
1305 aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
1306 }
1307
1308 if (!param)
1309 return;
1310
1311 aso_ctrl->data_mask_mode = param->data_mask_mode << 6;
1312 aso_ctrl->condition_1_0_operand = param->condition_1_operand |
1313 param->condition_0_operand << 4;
1314 aso_ctrl->condition_1_0_offset = param->condition_1_offset |
1315 param->condition_0_offset << 4;
1316 aso_ctrl->data_offset_condition_operand = param->data_offset |
1317 param->condition_operand << 6;
1318 aso_ctrl->condition_0_data = cpu_to_be32(param->condition_0_data);
1319 aso_ctrl->condition_0_mask = cpu_to_be32(param->condition_0_mask);
1320 aso_ctrl->condition_1_data = cpu_to_be32(param->condition_1_data);
1321 aso_ctrl->condition_1_mask = cpu_to_be32(param->condition_1_mask);
1322 aso_ctrl->bitwise_data = cpu_to_be64(param->bitwise_data);
1323 aso_ctrl->data_mask = cpu_to_be64(param->data_mask);
1324 }
1325
mlx5e_macsec_modify_obj(struct mlx5_core_dev * mdev,struct mlx5_macsec_obj_attrs * attrs,u32 macsec_id)1326 static int mlx5e_macsec_modify_obj(struct mlx5_core_dev *mdev, struct mlx5_macsec_obj_attrs *attrs,
1327 u32 macsec_id)
1328 {
1329 u32 in[MLX5_ST_SZ_DW(modify_macsec_obj_in)] = {};
1330 u32 out[MLX5_ST_SZ_DW(query_macsec_obj_out)];
1331 u64 modify_field_select = 0;
1332 void *obj;
1333 int err;
1334
1335 /* General object fields set */
1336 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
1337 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_MACSEC);
1338 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, macsec_id);
1339 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1340 if (err) {
1341 mlx5_core_err(mdev, "Query MACsec object failed (Object id %d), err = %d\n",
1342 macsec_id, err);
1343 return err;
1344 }
1345
1346 obj = MLX5_ADDR_OF(query_macsec_obj_out, out, macsec_object);
1347 modify_field_select = MLX5_GET64(macsec_offload_obj, obj, modify_field_select);
1348
1349 /* EPN */
1350 if (!(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP) ||
1351 !(modify_field_select & MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB)) {
1352 mlx5_core_dbg(mdev, "MACsec object field is not modifiable (Object id %d)\n",
1353 macsec_id);
1354 return -EOPNOTSUPP;
1355 }
1356
1357 obj = MLX5_ADDR_OF(modify_macsec_obj_in, in, macsec_object);
1358 MLX5_SET64(macsec_offload_obj, obj, modify_field_select,
1359 MLX5_MODIFY_MACSEC_BITMASK_EPN_OVERLAP | MLX5_MODIFY_MACSEC_BITMASK_EPN_MSB);
1360 MLX5_SET(macsec_offload_obj, obj, epn_msb, attrs->epn_state.epn_msb);
1361 MLX5_SET(macsec_offload_obj, obj, epn_overlap, attrs->epn_state.overlap);
1362
1363 /* General object fields set */
1364 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
1365
1366 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
1367 }
1368
macsec_aso_build_ctrl(struct mlx5e_macsec_aso * aso,struct mlx5_wqe_aso_ctrl_seg * aso_ctrl,struct mlx5e_macsec_aso_in * in)1369 static void macsec_aso_build_ctrl(struct mlx5e_macsec_aso *aso,
1370 struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
1371 struct mlx5e_macsec_aso_in *in)
1372 {
1373 struct mlx5_aso_ctrl_param param = {};
1374
1375 param.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT;
1376 param.condition_0_operand = MLX5_ASO_ALWAYS_TRUE;
1377 param.condition_1_operand = MLX5_ASO_ALWAYS_TRUE;
1378 if (in->mode == MLX5_MACSEC_EPN) {
1379 param.data_offset = MLX5_MACSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
1380 param.bitwise_data = BIT_ULL(54);
1381 param.data_mask = param.bitwise_data;
1382 }
1383 macsec_aso_build_wqe_ctrl_seg(aso, aso_ctrl, ¶m);
1384 }
1385
macsec_aso_set_arm_event(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in)1386 static int macsec_aso_set_arm_event(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1387 struct mlx5e_macsec_aso_in *in)
1388 {
1389 struct mlx5e_macsec_aso *aso;
1390 struct mlx5_aso_wqe *aso_wqe;
1391 struct mlx5_aso *maso;
1392 int err;
1393
1394 aso = &macsec->aso;
1395 maso = aso->maso;
1396
1397 mutex_lock(&aso->aso_lock);
1398 aso_wqe = mlx5_aso_get_wqe(maso);
1399 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1400 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1401 macsec_aso_build_ctrl(aso, &aso_wqe->aso_ctrl, in);
1402 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1403 err = mlx5_aso_poll_cq(maso, false);
1404 mutex_unlock(&aso->aso_lock);
1405
1406 return err;
1407 }
1408
macsec_aso_query(struct mlx5_core_dev * mdev,struct mlx5e_macsec * macsec,struct mlx5e_macsec_aso_in * in,struct mlx5e_macsec_aso_out * out)1409 static int macsec_aso_query(struct mlx5_core_dev *mdev, struct mlx5e_macsec *macsec,
1410 struct mlx5e_macsec_aso_in *in, struct mlx5e_macsec_aso_out *out)
1411 {
1412 struct mlx5e_macsec_aso *aso;
1413 struct mlx5_aso_wqe *aso_wqe;
1414 struct mlx5_aso *maso;
1415 unsigned long expires;
1416 int err;
1417
1418 aso = &macsec->aso;
1419 maso = aso->maso;
1420
1421 mutex_lock(&aso->aso_lock);
1422
1423 aso_wqe = mlx5_aso_get_wqe(maso);
1424 mlx5_aso_build_wqe(maso, MLX5_MACSEC_ASO_DS_CNT, aso_wqe, in->obj_id,
1425 MLX5_ACCESS_ASO_OPC_MOD_MACSEC);
1426 macsec_aso_build_wqe_ctrl_seg(aso, &aso_wqe->aso_ctrl, NULL);
1427
1428 mlx5_aso_post_wqe(maso, false, &aso_wqe->ctrl);
1429 expires = jiffies + msecs_to_jiffies(10);
1430 do {
1431 err = mlx5_aso_poll_cq(maso, false);
1432 if (err)
1433 usleep_range(2, 10);
1434 } while (err && time_is_after_jiffies(expires));
1435
1436 if (err)
1437 goto err_out;
1438
1439 if (MLX5_GET(macsec_aso, aso->umr->ctx, epn_event_arm))
1440 out->event_arm |= MLX5E_ASO_EPN_ARM;
1441
1442 out->mode_param = MLX5_GET(macsec_aso, aso->umr->ctx, mode_parameter);
1443
1444 err_out:
1445 mutex_unlock(&aso->aso_lock);
1446 return err;
1447 }
1448
get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1449 static struct mlx5e_macsec_sa *get_macsec_tx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1450 const u32 obj_id)
1451 {
1452 const struct list_head *device_list;
1453 struct mlx5e_macsec_sa *macsec_sa;
1454 struct mlx5e_macsec_device *iter;
1455 int i;
1456
1457 device_list = &macsec->macsec_device_list_head;
1458
1459 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1460 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1461 macsec_sa = iter->tx_sa[i];
1462 if (!macsec_sa || !macsec_sa->active)
1463 continue;
1464 if (macsec_sa->macsec_obj_id == obj_id)
1465 return macsec_sa;
1466 }
1467 }
1468
1469 return NULL;
1470 }
1471
get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec * macsec,const u32 obj_id)1472 static struct mlx5e_macsec_sa *get_macsec_rx_sa_from_obj_id(const struct mlx5e_macsec *macsec,
1473 const u32 obj_id)
1474 {
1475 const struct list_head *device_list, *sc_list;
1476 struct mlx5e_macsec_rx_sc *mlx5e_rx_sc;
1477 struct mlx5e_macsec_sa *macsec_sa;
1478 struct mlx5e_macsec_device *iter;
1479 int i;
1480
1481 device_list = &macsec->macsec_device_list_head;
1482
1483 list_for_each_entry(iter, device_list, macsec_device_list_element) {
1484 sc_list = &iter->macsec_rx_sc_list_head;
1485 list_for_each_entry(mlx5e_rx_sc, sc_list, rx_sc_list_element) {
1486 for (i = 0; i < MACSEC_NUM_AN; ++i) {
1487 macsec_sa = mlx5e_rx_sc->rx_sa[i];
1488 if (!macsec_sa || !macsec_sa->active)
1489 continue;
1490 if (macsec_sa->macsec_obj_id == obj_id)
1491 return macsec_sa;
1492 }
1493 }
1494 }
1495
1496 return NULL;
1497 }
1498
macsec_epn_update(struct mlx5e_macsec * macsec,struct mlx5_core_dev * mdev,struct mlx5e_macsec_sa * sa,u32 obj_id,u32 mode_param)1499 static void macsec_epn_update(struct mlx5e_macsec *macsec, struct mlx5_core_dev *mdev,
1500 struct mlx5e_macsec_sa *sa, u32 obj_id, u32 mode_param)
1501 {
1502 struct mlx5_macsec_obj_attrs attrs = {};
1503 struct mlx5e_macsec_aso_in in = {};
1504
1505 /* When the bottom of the replay protection window (mode_param) crosses 2^31 (half sequence
1506 * number wraparound) hence mode_param > MLX5_MACSEC_EPN_SCOPE_MID the SW should update the
1507 * esn_overlap to OLD (1).
1508 * When the bottom of the replay protection window (mode_param) crosses 2^32 (full sequence
1509 * number wraparound) hence mode_param < MLX5_MACSEC_EPN_SCOPE_MID since it did a
1510 * wraparound, the SW should update the esn_overlap to NEW (0), and increment the esn_msb.
1511 */
1512
1513 if (mode_param < MLX5_MACSEC_EPN_SCOPE_MID) {
1514 sa->epn_state.epn_msb++;
1515 sa->epn_state.overlap = 0;
1516 } else {
1517 sa->epn_state.overlap = 1;
1518 }
1519
1520 macsec_build_accel_attrs(sa, &attrs);
1521 mlx5e_macsec_modify_obj(mdev, &attrs, obj_id);
1522
1523 /* Re-set EPN arm event */
1524 in.obj_id = obj_id;
1525 in.mode = MLX5_MACSEC_EPN;
1526 macsec_aso_set_arm_event(mdev, macsec, &in);
1527 }
1528
macsec_async_event(struct work_struct * work)1529 static void macsec_async_event(struct work_struct *work)
1530 {
1531 struct mlx5e_macsec_async_work *async_work;
1532 struct mlx5e_macsec_aso_out out = {};
1533 struct mlx5e_macsec_aso_in in = {};
1534 struct mlx5e_macsec_sa *macsec_sa;
1535 struct mlx5e_macsec *macsec;
1536 struct mlx5_core_dev *mdev;
1537 u32 obj_id;
1538
1539 async_work = container_of(work, struct mlx5e_macsec_async_work, work);
1540 macsec = async_work->macsec;
1541 mutex_lock(&macsec->lock);
1542
1543 mdev = async_work->mdev;
1544 obj_id = async_work->obj_id;
1545 macsec_sa = get_macsec_tx_sa_from_obj_id(macsec, obj_id);
1546 if (!macsec_sa) {
1547 macsec_sa = get_macsec_rx_sa_from_obj_id(macsec, obj_id);
1548 if (!macsec_sa) {
1549 mlx5_core_dbg(mdev, "MACsec SA is not found (SA object id %d)\n", obj_id);
1550 goto out_async_work;
1551 }
1552 }
1553
1554 /* Query MACsec ASO context */
1555 in.obj_id = obj_id;
1556 macsec_aso_query(mdev, macsec, &in, &out);
1557
1558 /* EPN case */
1559 if (macsec_sa->epn_state.epn_enabled && !(out.event_arm & MLX5E_ASO_EPN_ARM))
1560 macsec_epn_update(macsec, mdev, macsec_sa, obj_id, out.mode_param);
1561
1562 out_async_work:
1563 kfree(async_work);
1564 mutex_unlock(&macsec->lock);
1565 }
1566
macsec_obj_change_event(struct notifier_block * nb,unsigned long event,void * data)1567 static int macsec_obj_change_event(struct notifier_block *nb, unsigned long event, void *data)
1568 {
1569 struct mlx5e_macsec *macsec = container_of(nb, struct mlx5e_macsec, nb);
1570 struct mlx5e_macsec_async_work *async_work;
1571 struct mlx5_eqe_obj_change *obj_change;
1572 struct mlx5_eqe *eqe = data;
1573 u16 obj_type;
1574 u32 obj_id;
1575
1576 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
1577 return NOTIFY_DONE;
1578
1579 obj_change = &eqe->data.obj_change;
1580 obj_type = be16_to_cpu(obj_change->obj_type);
1581 obj_id = be32_to_cpu(obj_change->obj_id);
1582
1583 if (obj_type != MLX5_GENERAL_OBJECT_TYPES_MACSEC)
1584 return NOTIFY_DONE;
1585
1586 async_work = kzalloc(sizeof(*async_work), GFP_ATOMIC);
1587 if (!async_work)
1588 return NOTIFY_DONE;
1589
1590 async_work->macsec = macsec;
1591 async_work->mdev = macsec->mdev;
1592 async_work->obj_id = obj_id;
1593
1594 INIT_WORK(&async_work->work, macsec_async_event);
1595
1596 WARN_ON(!queue_work(macsec->wq, &async_work->work));
1597
1598 return NOTIFY_OK;
1599 }
1600
mlx5e_macsec_aso_init(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1601 static int mlx5e_macsec_aso_init(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1602 {
1603 struct mlx5_aso *maso;
1604 int err;
1605
1606 err = mlx5_core_alloc_pd(mdev, &aso->pdn);
1607 if (err) {
1608 mlx5_core_err(mdev,
1609 "MACsec offload: Failed to alloc pd for MACsec ASO, err=%d\n",
1610 err);
1611 return err;
1612 }
1613
1614 maso = mlx5_aso_create(mdev, aso->pdn);
1615 if (IS_ERR(maso)) {
1616 err = PTR_ERR(maso);
1617 goto err_aso;
1618 }
1619
1620 err = mlx5e_macsec_aso_reg_mr(mdev, aso);
1621 if (err)
1622 goto err_aso_reg;
1623
1624 mutex_init(&aso->aso_lock);
1625
1626 aso->maso = maso;
1627
1628 return 0;
1629
1630 err_aso_reg:
1631 mlx5_aso_destroy(maso);
1632 err_aso:
1633 mlx5_core_dealloc_pd(mdev, aso->pdn);
1634 return err;
1635 }
1636
mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso * aso,struct mlx5_core_dev * mdev)1637 static void mlx5e_macsec_aso_cleanup(struct mlx5e_macsec_aso *aso, struct mlx5_core_dev *mdev)
1638 {
1639 if (!aso)
1640 return;
1641
1642 mlx5e_macsec_aso_dereg_mr(mdev, aso);
1643
1644 mlx5_aso_destroy(aso->maso);
1645
1646 mlx5_core_dealloc_pd(mdev, aso->pdn);
1647 }
1648
mlx5e_is_macsec_device(const struct mlx5_core_dev * mdev)1649 bool mlx5e_is_macsec_device(const struct mlx5_core_dev *mdev)
1650 {
1651 if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
1652 MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD))
1653 return false;
1654
1655 if (!MLX5_CAP_GEN(mdev, log_max_dek))
1656 return false;
1657
1658 if (!MLX5_CAP_MACSEC(mdev, log_max_macsec_offload))
1659 return false;
1660
1661 if (!MLX5_CAP_FLOWTABLE_NIC_RX(mdev, macsec_decrypt) ||
1662 !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_remove_macsec))
1663 return false;
1664
1665 if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, macsec_encrypt) ||
1666 !MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_macsec))
1667 return false;
1668
1669 if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_encrypt) &&
1670 !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_encrypt))
1671 return false;
1672
1673 if (!MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_128_decrypt) &&
1674 !MLX5_CAP_MACSEC(mdev, macsec_crypto_esp_aes_gcm_256_decrypt))
1675 return false;
1676
1677 return true;
1678 }
1679
mlx5e_macsec_get_stats_fill(struct mlx5e_macsec * macsec,void * macsec_stats)1680 void mlx5e_macsec_get_stats_fill(struct mlx5e_macsec *macsec, void *macsec_stats)
1681 {
1682 mlx5e_macsec_fs_get_stats_fill(macsec->macsec_fs, macsec_stats);
1683 }
1684
mlx5e_macsec_get_stats(struct mlx5e_macsec * macsec)1685 struct mlx5e_macsec_stats *mlx5e_macsec_get_stats(struct mlx5e_macsec *macsec)
1686 {
1687 if (!macsec)
1688 return NULL;
1689
1690 return &macsec->stats;
1691 }
1692
1693 static const struct macsec_ops macsec_offload_ops = {
1694 .mdo_add_txsa = mlx5e_macsec_add_txsa,
1695 .mdo_upd_txsa = mlx5e_macsec_upd_txsa,
1696 .mdo_del_txsa = mlx5e_macsec_del_txsa,
1697 .mdo_add_rxsc = mlx5e_macsec_add_rxsc,
1698 .mdo_upd_rxsc = mlx5e_macsec_upd_rxsc,
1699 .mdo_del_rxsc = mlx5e_macsec_del_rxsc,
1700 .mdo_add_rxsa = mlx5e_macsec_add_rxsa,
1701 .mdo_upd_rxsa = mlx5e_macsec_upd_rxsa,
1702 .mdo_del_rxsa = mlx5e_macsec_del_rxsa,
1703 .mdo_add_secy = mlx5e_macsec_add_secy,
1704 .mdo_upd_secy = mlx5e_macsec_upd_secy,
1705 .mdo_del_secy = mlx5e_macsec_del_secy,
1706 };
1707
mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec * macsec,struct sk_buff * skb)1708 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
1709 {
1710 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1711 u32 fs_id;
1712
1713 fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1714 if (!fs_id)
1715 goto err_out;
1716
1717 return true;
1718
1719 err_out:
1720 dev_kfree_skb_any(skb);
1721 return false;
1722 }
1723
mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec * macsec,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)1724 void mlx5e_macsec_tx_build_eseg(struct mlx5e_macsec *macsec,
1725 struct sk_buff *skb,
1726 struct mlx5_wqe_eth_seg *eseg)
1727 {
1728 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1729 u32 fs_id;
1730
1731 fs_id = mlx5e_macsec_get_sa_from_hashtable(&macsec->sci_hash, &md_dst->u.macsec_info.sci);
1732 if (!fs_id)
1733 return;
1734
1735 eseg->flow_table_metadata = cpu_to_be32(MLX5_ETH_WQE_FT_META_MACSEC | fs_id << 2);
1736 }
1737
mlx5e_macsec_offload_handle_rx_skb(struct net_device * netdev,struct sk_buff * skb,struct mlx5_cqe64 * cqe)1738 void mlx5e_macsec_offload_handle_rx_skb(struct net_device *netdev,
1739 struct sk_buff *skb,
1740 struct mlx5_cqe64 *cqe)
1741 {
1742 struct mlx5e_macsec_rx_sc_xarray_element *sc_xarray_element;
1743 u32 macsec_meta_data = be32_to_cpu(cqe->ft_metadata);
1744 struct mlx5e_priv *priv = netdev_priv(netdev);
1745 struct mlx5e_macsec_rx_sc *rx_sc;
1746 struct mlx5e_macsec *macsec;
1747 u32 fs_id;
1748
1749 macsec = priv->macsec;
1750 if (!macsec)
1751 return;
1752
1753 fs_id = MLX5_MACSEC_RX_METADAT_HANDLE(macsec_meta_data);
1754
1755 rcu_read_lock();
1756 sc_xarray_element = xa_load(&macsec->sc_xarray, fs_id);
1757 rx_sc = sc_xarray_element->rx_sc;
1758 if (rx_sc) {
1759 dst_hold(&rx_sc->md_dst->dst);
1760 skb_dst_set(skb, &rx_sc->md_dst->dst);
1761 }
1762
1763 rcu_read_unlock();
1764 }
1765
mlx5e_macsec_build_netdev(struct mlx5e_priv * priv)1766 void mlx5e_macsec_build_netdev(struct mlx5e_priv *priv)
1767 {
1768 struct net_device *netdev = priv->netdev;
1769
1770 if (!mlx5e_is_macsec_device(priv->mdev))
1771 return;
1772
1773 /* Enable MACsec */
1774 mlx5_core_dbg(priv->mdev, "mlx5e: MACsec acceleration enabled\n");
1775 netdev->macsec_ops = &macsec_offload_ops;
1776 netdev->features |= NETIF_F_HW_MACSEC;
1777 netif_keep_dst(netdev);
1778 }
1779
mlx5e_macsec_init(struct mlx5e_priv * priv)1780 int mlx5e_macsec_init(struct mlx5e_priv *priv)
1781 {
1782 struct mlx5_core_dev *mdev = priv->mdev;
1783 struct mlx5e_macsec *macsec = NULL;
1784 struct mlx5e_macsec_fs *macsec_fs;
1785 int err;
1786
1787 if (!mlx5e_is_macsec_device(priv->mdev)) {
1788 mlx5_core_dbg(mdev, "Not a MACsec offload device\n");
1789 return 0;
1790 }
1791
1792 macsec = kzalloc(sizeof(*macsec), GFP_KERNEL);
1793 if (!macsec)
1794 return -ENOMEM;
1795
1796 INIT_LIST_HEAD(&macsec->macsec_device_list_head);
1797 mutex_init(&macsec->lock);
1798
1799 err = rhashtable_init(&macsec->sci_hash, &rhash_sci);
1800 if (err) {
1801 mlx5_core_err(mdev, "MACsec offload: Failed to init SCI hash table, err=%d\n",
1802 err);
1803 goto err_hash;
1804 }
1805
1806 err = mlx5e_macsec_aso_init(&macsec->aso, priv->mdev);
1807 if (err) {
1808 mlx5_core_err(mdev, "MACsec offload: Failed to init aso, err=%d\n", err);
1809 goto err_aso;
1810 }
1811
1812 macsec->wq = alloc_ordered_workqueue("mlx5e_macsec_%s", 0, priv->netdev->name);
1813 if (!macsec->wq) {
1814 err = -ENOMEM;
1815 goto err_wq;
1816 }
1817
1818 xa_init_flags(&macsec->sc_xarray, XA_FLAGS_ALLOC1);
1819
1820 priv->macsec = macsec;
1821
1822 macsec->mdev = mdev;
1823
1824 macsec_fs = mlx5e_macsec_fs_init(mdev, priv->netdev);
1825 if (!macsec_fs) {
1826 err = -ENOMEM;
1827 goto err_out;
1828 }
1829
1830 macsec->macsec_fs = macsec_fs;
1831
1832 macsec->nb.notifier_call = macsec_obj_change_event;
1833 mlx5_notifier_register(mdev, &macsec->nb);
1834
1835 mlx5_core_dbg(mdev, "MACsec attached to netdevice\n");
1836
1837 return 0;
1838
1839 err_out:
1840 destroy_workqueue(macsec->wq);
1841 err_wq:
1842 mlx5e_macsec_aso_cleanup(&macsec->aso, priv->mdev);
1843 err_aso:
1844 rhashtable_destroy(&macsec->sci_hash);
1845 err_hash:
1846 kfree(macsec);
1847 priv->macsec = NULL;
1848 return err;
1849 }
1850
mlx5e_macsec_cleanup(struct mlx5e_priv * priv)1851 void mlx5e_macsec_cleanup(struct mlx5e_priv *priv)
1852 {
1853 struct mlx5e_macsec *macsec = priv->macsec;
1854 struct mlx5_core_dev *mdev = priv->mdev;
1855
1856 if (!macsec)
1857 return;
1858
1859 mlx5_notifier_unregister(mdev, &macsec->nb);
1860 mlx5e_macsec_fs_cleanup(macsec->macsec_fs);
1861 destroy_workqueue(macsec->wq);
1862 mlx5e_macsec_aso_cleanup(&macsec->aso, mdev);
1863 rhashtable_destroy(&macsec->sci_hash);
1864 mutex_destroy(&macsec->lock);
1865 kfree(macsec);
1866 }
1867