1 /*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 #include <net/netevent.h>
39
40 #include "en.h"
41 #include "eswitch.h"
42 #include "ipsec.h"
43 #include "ipsec_rxtx.h"
44 #include "en_rep.h"
45
46 #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
47 #define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
48
to_ipsec_sa_entry(struct xfrm_state * x)49 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
50 {
51 return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
52 }
53
to_ipsec_pol_entry(struct xfrm_policy * x)54 static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
55 {
56 return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
57 }
58
mlx5e_ipsec_handle_sw_limits(struct work_struct * _work)59 static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
60 {
61 struct mlx5e_ipsec_dwork *dwork =
62 container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
63 struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
64 struct xfrm_state *x = sa_entry->x;
65
66 if (sa_entry->attrs.drop)
67 return;
68
69 spin_lock_bh(&x->lock);
70 if (x->km.state == XFRM_STATE_EXPIRED) {
71 sa_entry->attrs.drop = true;
72 spin_unlock_bh(&x->lock);
73
74 mlx5e_accel_ipsec_fs_modify(sa_entry);
75 return;
76 }
77
78 if (x->km.state != XFRM_STATE_VALID) {
79 spin_unlock_bh(&x->lock);
80 return;
81 }
82
83 xfrm_state_check_expire(x);
84 spin_unlock_bh(&x->lock);
85
86 queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
87 MLX5_IPSEC_RESCHED);
88 }
89
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry)90 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
91 {
92 struct xfrm_state *x = sa_entry->x;
93 u32 seq_bottom = 0;
94 u32 esn, esn_msb;
95 u8 overlap;
96
97 switch (x->xso.type) {
98 case XFRM_DEV_OFFLOAD_PACKET:
99 switch (x->xso.dir) {
100 case XFRM_DEV_OFFLOAD_IN:
101 esn = x->replay_esn->seq;
102 esn_msb = x->replay_esn->seq_hi;
103 break;
104 case XFRM_DEV_OFFLOAD_OUT:
105 esn = x->replay_esn->oseq;
106 esn_msb = x->replay_esn->oseq_hi;
107 break;
108 default:
109 WARN_ON(true);
110 return false;
111 }
112 break;
113 case XFRM_DEV_OFFLOAD_CRYPTO:
114 /* Already parsed by XFRM core */
115 esn = x->replay_esn->seq;
116 break;
117 default:
118 WARN_ON(true);
119 return false;
120 }
121
122 overlap = sa_entry->esn_state.overlap;
123
124 if (esn >= x->replay_esn->replay_window)
125 seq_bottom = esn - x->replay_esn->replay_window + 1;
126
127 if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
128 esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
129
130 if (sa_entry->esn_state.esn_msb)
131 sa_entry->esn_state.esn = esn;
132 else
133 /* According to RFC4303, section "3.3.3. Sequence Number Generation",
134 * the first packet sent using a given SA will contain a sequence
135 * number of 1.
136 */
137 sa_entry->esn_state.esn = max_t(u32, esn, 1);
138 sa_entry->esn_state.esn_msb = esn_msb;
139
140 if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
141 sa_entry->esn_state.overlap = 0;
142 return true;
143 } else if (unlikely(!overlap &&
144 (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
145 sa_entry->esn_state.overlap = 1;
146 return true;
147 }
148
149 return false;
150 }
151
mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)152 static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
153 struct mlx5_accel_esp_xfrm_attrs *attrs)
154 {
155 struct xfrm_state *x = sa_entry->x;
156 s64 start_value, n;
157
158 attrs->lft.hard_packet_limit = x->lft.hard_packet_limit;
159 attrs->lft.soft_packet_limit = x->lft.soft_packet_limit;
160 if (x->lft.soft_packet_limit == XFRM_INF)
161 return;
162
163 /* Compute hard limit initial value and number of rounds.
164 *
165 * The counting pattern of hardware counter goes:
166 * value -> 2^31-1
167 * 2^31 | (2^31-1) -> 2^31-1
168 * 2^31 | (2^31-1) -> 2^31-1
169 * [..]
170 * 2^31 | (2^31-1) -> 0
171 *
172 * The pattern is created by using an ASO operation to atomically set
173 * bit 31 after the down counter clears bit 31. This is effectively an
174 * atomic addition of 2**31 to the counter.
175 *
176 * We wish to configure the counter, within the above pattern, so that
177 * when it reaches 0, it has hit the hard limit. This is defined by this
178 * system of equations:
179 *
180 * hard_limit == start_value + n * 2^31
181 * n >= 0
182 * start_value < 2^32, start_value >= 0
183 *
184 * These equations are not single-solution, there are often two choices:
185 * hard_limit == start_value + n * 2^31
186 * hard_limit == (start_value+2^31) + (n-1) * 2^31
187 *
188 * The algorithm selects the solution that keeps the counter value
189 * above 2^31 until the final iteration.
190 */
191
192 /* Start by estimating n and compute start_value */
193 n = attrs->lft.hard_packet_limit / BIT_ULL(31);
194 start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
195
196 /* Choose the best of the two solutions: */
197 if (n >= 1)
198 n -= 1;
199
200 /* Computed values solve the system of equations: */
201 start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
202
203 /* The best solution means: when there are multiple iterations we must
204 * start above 2^31 and count down to 2**31 to get the interrupt.
205 */
206 attrs->lft.hard_packet_limit = lower_32_bits(start_value);
207 attrs->lft.numb_rounds_hard = (u64)n;
208
209 /* Compute soft limit initial value and number of rounds.
210 *
211 * The soft_limit is achieved by adjusting the counter's
212 * interrupt_value. This is embedded in the counting pattern created by
213 * hard packet calculations above.
214 *
215 * We wish to compute the interrupt_value for the soft_limit. This is
216 * defined by this system of equations:
217 *
218 * soft_limit == start_value - soft_value + n * 2^31
219 * n >= 0
220 * soft_value < 2^32, soft_value >= 0
221 * for n == 0 start_value > soft_value
222 *
223 * As with compute_hard_n_value() the equations are not single-solution.
224 * The algorithm selects the solution that has:
225 * 2^30 <= soft_limit < 2^31 + 2^30
226 * for the interior iterations, which guarantees a large guard band
227 * around the counter hard limit and next interrupt.
228 */
229
230 /* Start by estimating n and compute soft_value */
231 n = (x->lft.soft_packet_limit - attrs->lft.hard_packet_limit) / BIT_ULL(31);
232 start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) -
233 x->lft.soft_packet_limit;
234
235 /* Compare against constraints and adjust n */
236 if (n < 0)
237 n = 0;
238 else if (start_value >= BIT_ULL(32))
239 n -= 1;
240 else if (start_value < 0)
241 n += 1;
242
243 /* Choose the best of the two solutions: */
244 start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
245 if (n != attrs->lft.numb_rounds_hard && start_value < BIT_ULL(30))
246 n += 1;
247
248 /* Note that the upper limit of soft_value happens naturally because we
249 * always select the lowest soft_value.
250 */
251
252 /* Computed values solve the system of equations: */
253 start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
254
255 /* The best solution means: when there are multiple iterations we must
256 * not fall below 2^30 as that would get too close to the false
257 * hard_limit and when we reach an interior iteration for soft_limit it
258 * has to be far away from 2**32-1 which is the counter reset point
259 * after the +2^31 to accommodate latency.
260 */
261 attrs->lft.soft_packet_limit = lower_32_bits(start_value);
262 attrs->lft.numb_rounds_soft = (u64)n;
263 }
264
mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)265 static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
266 struct mlx5_accel_esp_xfrm_attrs *attrs)
267 {
268 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
269 struct net_device *netdev = sa_entry->dev;
270 struct neighbour *n;
271 u8 addr[ETH_ALEN];
272 const void *pkey;
273 u8 *dst, *src;
274
275 if (attrs->mode != XFRM_MODE_TUNNEL ||
276 attrs->type != XFRM_DEV_OFFLOAD_PACKET)
277 return;
278
279 mlx5_query_mac_address(mdev, addr);
280 switch (attrs->dir) {
281 case XFRM_DEV_OFFLOAD_IN:
282 src = attrs->dmac;
283 dst = attrs->smac;
284 pkey = &attrs->saddr.a4;
285 break;
286 case XFRM_DEV_OFFLOAD_OUT:
287 src = attrs->smac;
288 dst = attrs->dmac;
289 pkey = &attrs->daddr.a4;
290 break;
291 default:
292 return;
293 }
294
295 ether_addr_copy(src, addr);
296 n = neigh_lookup(&arp_tbl, pkey, netdev);
297 if (!n) {
298 n = neigh_create(&arp_tbl, pkey, netdev);
299 if (IS_ERR(n))
300 return;
301 neigh_event_send(n, NULL);
302 attrs->drop = true;
303 } else {
304 neigh_ha_snapshot(addr, n, netdev);
305 ether_addr_copy(dst, addr);
306 }
307 neigh_release(n);
308 }
309
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)310 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
311 struct mlx5_accel_esp_xfrm_attrs *attrs)
312 {
313 struct xfrm_state *x = sa_entry->x;
314 struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
315 struct aead_geniv_ctx *geniv_ctx;
316 struct crypto_aead *aead;
317 unsigned int crypto_data_len, key_len;
318 int ivsize;
319
320 memset(attrs, 0, sizeof(*attrs));
321
322 /* key */
323 crypto_data_len = (x->aead->alg_key_len + 7) / 8;
324 key_len = crypto_data_len - 4; /* 4 bytes salt at end */
325
326 memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
327 aes_gcm->key_len = key_len * 8;
328
329 /* salt and seq_iv */
330 aead = x->data;
331 geniv_ctx = crypto_aead_ctx(aead);
332 ivsize = crypto_aead_ivsize(aead);
333 memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
334 memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
335 sizeof(aes_gcm->salt));
336
337 attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
338
339 /* iv len */
340 aes_gcm->icv_len = x->aead->alg_icv_len;
341
342 attrs->dir = x->xso.dir;
343
344 /* esn */
345 if (x->props.flags & XFRM_STATE_ESN) {
346 attrs->replay_esn.trigger = true;
347 attrs->replay_esn.esn = sa_entry->esn_state.esn;
348 attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
349 attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
350 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
351 goto skip_replay_window;
352
353 switch (x->replay_esn->replay_window) {
354 case 32:
355 attrs->replay_esn.replay_window =
356 MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
357 break;
358 case 64:
359 attrs->replay_esn.replay_window =
360 MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
361 break;
362 case 128:
363 attrs->replay_esn.replay_window =
364 MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
365 break;
366 case 256:
367 attrs->replay_esn.replay_window =
368 MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
369 break;
370 default:
371 WARN_ON(true);
372 return;
373 }
374 }
375
376 skip_replay_window:
377 /* spi */
378 attrs->spi = be32_to_cpu(x->id.spi);
379
380 /* source , destination ips */
381 memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
382 memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
383 attrs->family = x->props.family;
384 attrs->type = x->xso.type;
385 attrs->reqid = x->props.reqid;
386 attrs->upspec.dport = ntohs(x->sel.dport);
387 attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
388 attrs->upspec.sport = ntohs(x->sel.sport);
389 attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
390 attrs->upspec.proto = x->sel.proto;
391 attrs->mode = x->props.mode;
392
393 mlx5e_ipsec_init_limits(sa_entry, attrs);
394 mlx5e_ipsec_init_macs(sa_entry, attrs);
395
396 if (x->encap) {
397 attrs->encap = true;
398 attrs->sport = x->encap->encap_sport;
399 attrs->dport = x->encap->encap_dport;
400 }
401 }
402
mlx5e_xfrm_validate_state(struct mlx5_core_dev * mdev,struct xfrm_state * x,struct netlink_ext_ack * extack)403 static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
404 struct xfrm_state *x,
405 struct netlink_ext_ack *extack)
406 {
407 if (x->props.aalgo != SADB_AALG_NONE) {
408 NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
409 return -EINVAL;
410 }
411 if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
412 NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded");
413 return -EINVAL;
414 }
415 if (x->props.calgo != SADB_X_CALG_NONE) {
416 NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
417 return -EINVAL;
418 }
419 if (x->props.flags & XFRM_STATE_ESN &&
420 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) {
421 NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
422 return -EINVAL;
423 }
424 if (x->props.family != AF_INET &&
425 x->props.family != AF_INET6) {
426 NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded");
427 return -EINVAL;
428 }
429 if (x->id.proto != IPPROTO_ESP) {
430 NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded");
431 return -EINVAL;
432 }
433 if (x->encap) {
434 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
435 NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
436 return -EINVAL;
437 }
438
439 if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
440 NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported");
441 return -EINVAL;
442 }
443
444 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) {
445 NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only");
446 return -EINVAL;
447 }
448
449 if (x->props.mode != XFRM_MODE_TRANSPORT) {
450 NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only");
451 return -EINVAL;
452 }
453 }
454 if (!x->aead) {
455 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
456 return -EINVAL;
457 }
458 if (x->aead->alg_icv_len != 128) {
459 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit");
460 return -EINVAL;
461 }
462 if ((x->aead->alg_key_len != 128 + 32) &&
463 (x->aead->alg_key_len != 256 + 32)) {
464 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit");
465 return -EINVAL;
466 }
467 if (x->tfcpad) {
468 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
469 return -EINVAL;
470 }
471 if (!x->geniv) {
472 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
473 return -EINVAL;
474 }
475 if (strcmp(x->geniv, "seqiv")) {
476 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
477 return -EINVAL;
478 }
479
480 if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP &&
481 x->sel.proto != IPPROTO_TCP) {
482 NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
483 return -EINVAL;
484 }
485
486 if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) {
487 NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
488 return -EINVAL;
489 }
490
491 switch (x->xso.type) {
492 case XFRM_DEV_OFFLOAD_CRYPTO:
493 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
494 NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported");
495 return -EINVAL;
496 }
497
498 break;
499 case XFRM_DEV_OFFLOAD_PACKET:
500 if (!(mlx5_ipsec_device_caps(mdev) &
501 MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
502 NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
503 return -EINVAL;
504 }
505
506 if (x->props.mode == XFRM_MODE_TUNNEL &&
507 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)) {
508 NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported for tunnel mode");
509 return -EINVAL;
510 }
511
512 if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
513 x->replay_esn->replay_window != 32 &&
514 x->replay_esn->replay_window != 64 &&
515 x->replay_esn->replay_window != 128 &&
516 x->replay_esn->replay_window != 256) {
517 NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size");
518 return -EINVAL;
519 }
520
521 if (!x->props.reqid) {
522 NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid");
523 return -EINVAL;
524 }
525
526 if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit &&
527 x->lft.hard_byte_limit != XFRM_INF) {
528 /* XFRM stack doesn't prevent such configuration :(. */
529 NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one");
530 return -EINVAL;
531 }
532
533 if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) {
534 NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0");
535 return -EINVAL;
536 }
537
538 if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
539 x->lft.hard_packet_limit != XFRM_INF) {
540 /* XFRM stack doesn't prevent such configuration :(. */
541 NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
542 return -EINVAL;
543 }
544
545 if (!x->lft.soft_packet_limit || !x->lft.hard_packet_limit) {
546 NL_SET_ERR_MSG_MOD(extack, "Soft/hard packet limits can't be 0");
547 return -EINVAL;
548 }
549 break;
550 default:
551 NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
552 return -EINVAL;
553 }
554 return 0;
555 }
556
mlx5e_ipsec_modify_state(struct work_struct * _work)557 static void mlx5e_ipsec_modify_state(struct work_struct *_work)
558 {
559 struct mlx5e_ipsec_work *work =
560 container_of(_work, struct mlx5e_ipsec_work, work);
561 struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
562 struct mlx5_accel_esp_xfrm_attrs *attrs;
563
564 attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs;
565
566 mlx5_accel_esp_modify_xfrm(sa_entry, attrs);
567 }
568
mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry * sa_entry)569 static void mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry *sa_entry)
570 {
571 struct xfrm_state *x = sa_entry->x;
572
573 if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO ||
574 x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
575 return;
576
577 if (x->props.flags & XFRM_STATE_ESN) {
578 sa_entry->set_iv_op = mlx5e_ipsec_set_iv_esn;
579 return;
580 }
581
582 sa_entry->set_iv_op = mlx5e_ipsec_set_iv;
583 }
584
mlx5e_ipsec_handle_netdev_event(struct work_struct * _work)585 static void mlx5e_ipsec_handle_netdev_event(struct work_struct *_work)
586 {
587 struct mlx5e_ipsec_work *work =
588 container_of(_work, struct mlx5e_ipsec_work, work);
589 struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
590 struct mlx5e_ipsec_netevent_data *data = work->data;
591 struct mlx5_accel_esp_xfrm_attrs *attrs;
592
593 attrs = &sa_entry->attrs;
594
595 switch (attrs->dir) {
596 case XFRM_DEV_OFFLOAD_IN:
597 ether_addr_copy(attrs->smac, data->addr);
598 break;
599 case XFRM_DEV_OFFLOAD_OUT:
600 ether_addr_copy(attrs->dmac, data->addr);
601 break;
602 default:
603 WARN_ON_ONCE(true);
604 }
605 attrs->drop = false;
606 mlx5e_accel_ipsec_fs_modify(sa_entry);
607 }
608
mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry * sa_entry)609 static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
610 {
611 struct xfrm_state *x = sa_entry->x;
612 struct mlx5e_ipsec_work *work;
613 void *data = NULL;
614
615 switch (x->xso.type) {
616 case XFRM_DEV_OFFLOAD_CRYPTO:
617 if (!(x->props.flags & XFRM_STATE_ESN))
618 return 0;
619 break;
620 case XFRM_DEV_OFFLOAD_PACKET:
621 if (x->props.mode != XFRM_MODE_TUNNEL)
622 return 0;
623 break;
624 default:
625 break;
626 }
627
628 work = kzalloc(sizeof(*work), GFP_KERNEL);
629 if (!work)
630 return -ENOMEM;
631
632 switch (x->xso.type) {
633 case XFRM_DEV_OFFLOAD_CRYPTO:
634 data = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
635 if (!data)
636 goto free_work;
637
638 INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
639 break;
640 case XFRM_DEV_OFFLOAD_PACKET:
641 data = kzalloc(sizeof(struct mlx5e_ipsec_netevent_data),
642 GFP_KERNEL);
643 if (!data)
644 goto free_work;
645
646 INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event);
647 break;
648 default:
649 break;
650 }
651
652 work->data = data;
653 work->sa_entry = sa_entry;
654 sa_entry->work = work;
655 return 0;
656
657 free_work:
658 kfree(work);
659 return -ENOMEM;
660 }
661
mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry * sa_entry)662 static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
663 {
664 struct xfrm_state *x = sa_entry->x;
665 struct mlx5e_ipsec_dwork *dwork;
666
667 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
668 return 0;
669
670 if (x->lft.soft_packet_limit == XFRM_INF &&
671 x->lft.hard_packet_limit == XFRM_INF &&
672 x->lft.soft_byte_limit == XFRM_INF &&
673 x->lft.hard_byte_limit == XFRM_INF)
674 return 0;
675
676 dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
677 if (!dwork)
678 return -ENOMEM;
679
680 dwork->sa_entry = sa_entry;
681 INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits);
682 sa_entry->dwork = dwork;
683 return 0;
684 }
685
mlx5e_xfrm_add_state(struct xfrm_state * x,struct netlink_ext_ack * extack)686 static int mlx5e_xfrm_add_state(struct xfrm_state *x,
687 struct netlink_ext_ack *extack)
688 {
689 struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
690 struct net_device *netdev = x->xso.real_dev;
691 struct mlx5e_ipsec *ipsec;
692 struct mlx5e_priv *priv;
693 gfp_t gfp;
694 int err;
695
696 priv = netdev_priv(netdev);
697 if (!priv->ipsec)
698 return -EOPNOTSUPP;
699
700 ipsec = priv->ipsec;
701 gfp = (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) ? GFP_ATOMIC : GFP_KERNEL;
702 sa_entry = kzalloc(sizeof(*sa_entry), gfp);
703 if (!sa_entry)
704 return -ENOMEM;
705
706 sa_entry->x = x;
707 sa_entry->dev = netdev;
708 sa_entry->ipsec = ipsec;
709 /* Check if this SA is originated from acquire flow temporary SA */
710 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
711 goto out;
712
713 err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
714 if (err)
715 goto err_xfrm;
716
717 if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
718 err = -EBUSY;
719 goto err_xfrm;
720 }
721
722 /* check esn */
723 if (x->props.flags & XFRM_STATE_ESN)
724 mlx5e_ipsec_update_esn_state(sa_entry);
725 else
726 /* According to RFC4303, section "3.3.3. Sequence Number Generation",
727 * the first packet sent using a given SA will contain a sequence
728 * number of 1.
729 */
730 sa_entry->esn_state.esn = 1;
731
732 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
733
734 err = mlx5_ipsec_create_work(sa_entry);
735 if (err)
736 goto unblock_ipsec;
737
738 err = mlx5e_ipsec_create_dwork(sa_entry);
739 if (err)
740 goto release_work;
741
742 /* create hw context */
743 err = mlx5_ipsec_create_sa_ctx(sa_entry);
744 if (err)
745 goto release_dwork;
746
747 err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
748 if (err)
749 goto err_hw_ctx;
750
751 if (x->props.mode == XFRM_MODE_TUNNEL &&
752 x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
753 !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) {
754 NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings");
755 err = -EINVAL;
756 goto err_add_rule;
757 }
758
759 /* We use *_bh() variant because xfrm_timer_handler(), which runs
760 * in softirq context, can reach our state delete logic and we need
761 * xa_erase_bh() there.
762 */
763 err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
764 GFP_KERNEL);
765 if (err)
766 goto err_add_rule;
767
768 mlx5e_ipsec_set_esn_ops(sa_entry);
769
770 if (sa_entry->dwork)
771 queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
772 MLX5_IPSEC_RESCHED);
773
774 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
775 x->props.mode == XFRM_MODE_TUNNEL) {
776 xa_lock_bh(&ipsec->sadb);
777 __xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
778 MLX5E_IPSEC_TUNNEL_SA);
779 xa_unlock_bh(&ipsec->sadb);
780 }
781
782 out:
783 x->xso.offload_handle = (unsigned long)sa_entry;
784 return 0;
785
786 err_add_rule:
787 mlx5e_accel_ipsec_fs_del_rule(sa_entry);
788 err_hw_ctx:
789 mlx5_ipsec_free_sa_ctx(sa_entry);
790 release_dwork:
791 kfree(sa_entry->dwork);
792 release_work:
793 if (sa_entry->work)
794 kfree(sa_entry->work->data);
795 kfree(sa_entry->work);
796 unblock_ipsec:
797 mlx5_eswitch_unblock_ipsec(priv->mdev);
798 err_xfrm:
799 kfree(sa_entry);
800 NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
801 return err;
802 }
803
mlx5e_xfrm_del_state(struct xfrm_state * x)804 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
805 {
806 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
807 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
808 struct mlx5e_ipsec_sa_entry *old;
809
810 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
811 return;
812
813 old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
814 WARN_ON(old != sa_entry);
815 }
816
mlx5e_xfrm_free_state(struct xfrm_state * x)817 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
818 {
819 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
820 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
821
822 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
823 goto sa_entry_free;
824
825 if (sa_entry->work)
826 cancel_work_sync(&sa_entry->work->work);
827
828 if (sa_entry->dwork)
829 cancel_delayed_work_sync(&sa_entry->dwork->dwork);
830
831 mlx5e_accel_ipsec_fs_del_rule(sa_entry);
832 mlx5_ipsec_free_sa_ctx(sa_entry);
833 kfree(sa_entry->dwork);
834 if (sa_entry->work)
835 kfree(sa_entry->work->data);
836 kfree(sa_entry->work);
837 mlx5_eswitch_unblock_ipsec(ipsec->mdev);
838 sa_entry_free:
839 kfree(sa_entry);
840 }
841
mlx5e_ipsec_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)842 static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
843 unsigned long event, void *ptr)
844 {
845 struct mlx5_accel_esp_xfrm_attrs *attrs;
846 struct mlx5e_ipsec_netevent_data *data;
847 struct mlx5e_ipsec_sa_entry *sa_entry;
848 struct mlx5e_ipsec *ipsec;
849 struct neighbour *n = ptr;
850 unsigned long idx;
851
852 if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
853 return NOTIFY_DONE;
854
855 ipsec = container_of(nb, struct mlx5e_ipsec, netevent_nb);
856 xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
857 attrs = &sa_entry->attrs;
858
859 if (attrs->family == AF_INET) {
860 if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
861 !neigh_key_eq32(n, &attrs->daddr.a4))
862 continue;
863 } else {
864 if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
865 !neigh_key_eq128(n, &attrs->daddr.a4))
866 continue;
867 }
868
869 data = sa_entry->work->data;
870
871 neigh_ha_snapshot(data->addr, n, sa_entry->dev);
872 queue_work(ipsec->wq, &sa_entry->work->work);
873 }
874
875 return NOTIFY_DONE;
876 }
877
mlx5e_ipsec_init(struct mlx5e_priv * priv)878 void mlx5e_ipsec_init(struct mlx5e_priv *priv)
879 {
880 struct mlx5e_ipsec *ipsec;
881 int ret = -ENOMEM;
882
883 if (!mlx5_ipsec_device_caps(priv->mdev)) {
884 netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
885 return;
886 }
887
888 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
889 if (!ipsec)
890 return;
891
892 xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
893 ipsec->mdev = priv->mdev;
894 init_completion(&ipsec->comp);
895 ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
896 priv->netdev->name);
897 if (!ipsec->wq)
898 goto err_wq;
899
900 if (mlx5_ipsec_device_caps(priv->mdev) &
901 MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
902 ret = mlx5e_ipsec_aso_init(ipsec);
903 if (ret)
904 goto err_aso;
905 }
906
907 if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) {
908 ipsec->netevent_nb.notifier_call = mlx5e_ipsec_netevent_event;
909 ret = register_netevent_notifier(&ipsec->netevent_nb);
910 if (ret)
911 goto clear_aso;
912 }
913
914 ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
915 ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom);
916 if (ret)
917 goto err_fs_init;
918
919 ipsec->fs = priv->fs;
920 priv->ipsec = ipsec;
921 netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
922 return;
923
924 err_fs_init:
925 if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
926 unregister_netevent_notifier(&ipsec->netevent_nb);
927 clear_aso:
928 if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
929 mlx5e_ipsec_aso_cleanup(ipsec);
930 err_aso:
931 destroy_workqueue(ipsec->wq);
932 err_wq:
933 kfree(ipsec);
934 mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
935 return;
936 }
937
mlx5e_ipsec_cleanup(struct mlx5e_priv * priv)938 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
939 {
940 struct mlx5e_ipsec *ipsec = priv->ipsec;
941
942 if (!ipsec)
943 return;
944
945 mlx5e_accel_ipsec_fs_cleanup(ipsec);
946 if (ipsec->netevent_nb.notifier_call) {
947 unregister_netevent_notifier(&ipsec->netevent_nb);
948 ipsec->netevent_nb.notifier_call = NULL;
949 }
950 if (ipsec->aso)
951 mlx5e_ipsec_aso_cleanup(ipsec);
952 destroy_workqueue(ipsec->wq);
953 kfree(ipsec);
954 priv->ipsec = NULL;
955 }
956
mlx5e_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)957 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
958 {
959 if (x->props.family == AF_INET) {
960 /* Offload with IPv4 options is not supported yet */
961 if (ip_hdr(skb)->ihl > 5)
962 return false;
963 } else {
964 /* Offload with IPv6 extension headers is not support yet */
965 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
966 return false;
967 }
968
969 return true;
970 }
971
mlx5e_xfrm_advance_esn_state(struct xfrm_state * x)972 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
973 {
974 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
975 struct mlx5e_ipsec_work *work = sa_entry->work;
976 struct mlx5e_ipsec_sa_entry *sa_entry_shadow;
977 bool need_update;
978
979 need_update = mlx5e_ipsec_update_esn_state(sa_entry);
980 if (!need_update)
981 return;
982
983 sa_entry_shadow = work->data;
984 memset(sa_entry_shadow, 0x00, sizeof(*sa_entry_shadow));
985 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry_shadow->attrs);
986 queue_work(sa_entry->ipsec->wq, &work->work);
987 }
988
mlx5e_xfrm_update_stats(struct xfrm_state * x)989 static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
990 {
991 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
992 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
993 struct net *net = dev_net(x->xso.dev);
994 u64 trailer_packets = 0, trailer_bytes = 0;
995 u64 replay_packets = 0, replay_bytes = 0;
996 u64 auth_packets = 0, auth_bytes = 0;
997 u64 success_packets, success_bytes;
998 u64 packets, bytes, lastuse;
999 size_t headers;
1000
1001 lockdep_assert(lockdep_is_held(&x->lock) ||
1002 lockdep_is_held(&net->xfrm.xfrm_cfg_mutex) ||
1003 lockdep_is_held(&net->xfrm.xfrm_state_lock));
1004
1005 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
1006 return;
1007
1008 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1009 mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
1010 &auth_packets, &lastuse);
1011 x->stats.integrity_failed += auth_packets;
1012 XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
1013
1014 mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
1015 &trailer_packets, &lastuse);
1016 XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
1017 }
1018
1019 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1020 return;
1021
1022 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
1023 mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
1024 &replay_packets, &lastuse);
1025 x->stats.replay += replay_packets;
1026 XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
1027 }
1028
1029 mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
1030 success_packets = packets - auth_packets - trailer_packets - replay_packets;
1031 x->curlft.packets += success_packets;
1032 /* NIC counts all bytes passed through flow steering and doesn't have
1033 * an ability to count payload data size which is needed for SA.
1034 *
1035 * To overcome HW limitestion, let's approximate the payload size
1036 * by removing always available headers.
1037 */
1038 headers = sizeof(struct ethhdr);
1039 if (sa_entry->attrs.family == AF_INET)
1040 headers += sizeof(struct iphdr);
1041 else
1042 headers += sizeof(struct ipv6hdr);
1043
1044 success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
1045 x->curlft.bytes += success_bytes - headers * success_packets;
1046 }
1047
mlx5e_xfrm_validate_policy(struct mlx5_core_dev * mdev,struct xfrm_policy * x,struct netlink_ext_ack * extack)1048 static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
1049 struct xfrm_policy *x,
1050 struct netlink_ext_ack *extack)
1051 {
1052 struct xfrm_selector *sel = &x->selector;
1053
1054 if (x->type != XFRM_POLICY_TYPE_MAIN) {
1055 NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
1056 return -EINVAL;
1057 }
1058
1059 /* Please pay attention that we support only one template */
1060 if (x->xfrm_nr > 1) {
1061 NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template");
1062 return -EINVAL;
1063 }
1064
1065 if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
1066 x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
1067 NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy");
1068 return -EINVAL;
1069 }
1070
1071 if (!x->xfrm_vec[0].reqid && sel->proto == IPPROTO_IP &&
1072 addr6_all_zero(sel->saddr.a6) && addr6_all_zero(sel->daddr.a6)) {
1073 NL_SET_ERR_MSG_MOD(extack, "Unsupported policy with reqid 0 without at least one of upper protocol or ip addr(s) different than 0");
1074 return -EINVAL;
1075 }
1076
1077 if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
1078 NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
1079 return -EINVAL;
1080 }
1081
1082 if (x->selector.proto != IPPROTO_IP &&
1083 x->selector.proto != IPPROTO_UDP &&
1084 x->selector.proto != IPPROTO_TCP) {
1085 NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
1086 return -EINVAL;
1087 }
1088
1089 if (x->priority) {
1090 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
1091 NL_SET_ERR_MSG_MOD(extack, "Device does not support policy priority");
1092 return -EINVAL;
1093 }
1094
1095 if (x->priority == U32_MAX) {
1096 NL_SET_ERR_MSG_MOD(extack, "Device does not support requested policy priority");
1097 return -EINVAL;
1098 }
1099 }
1100
1101 if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
1102 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
1103 NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
1104 return -EINVAL;
1105 }
1106
1107 return 0;
1108 }
1109
1110 static void
mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry * pol_entry,struct mlx5_accel_pol_xfrm_attrs * attrs)1111 mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
1112 struct mlx5_accel_pol_xfrm_attrs *attrs)
1113 {
1114 struct xfrm_policy *x = pol_entry->x;
1115 struct xfrm_selector *sel;
1116
1117 sel = &x->selector;
1118 memset(attrs, 0, sizeof(*attrs));
1119
1120 memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
1121 memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
1122 attrs->family = sel->family;
1123 attrs->dir = x->xdo.dir;
1124 attrs->action = x->action;
1125 attrs->type = XFRM_DEV_OFFLOAD_PACKET;
1126 attrs->reqid = x->xfrm_vec[0].reqid;
1127 attrs->upspec.dport = ntohs(sel->dport);
1128 attrs->upspec.dport_mask = ntohs(sel->dport_mask);
1129 attrs->upspec.sport = ntohs(sel->sport);
1130 attrs->upspec.sport_mask = ntohs(sel->sport_mask);
1131 attrs->upspec.proto = sel->proto;
1132 attrs->prio = x->priority;
1133 }
1134
mlx5e_xfrm_add_policy(struct xfrm_policy * x,struct netlink_ext_ack * extack)1135 static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
1136 struct netlink_ext_ack *extack)
1137 {
1138 struct net_device *netdev = x->xdo.dev;
1139 struct mlx5e_ipsec_pol_entry *pol_entry;
1140 struct mlx5e_priv *priv;
1141 int err;
1142
1143 priv = netdev_priv(netdev);
1144 if (!priv->ipsec) {
1145 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload");
1146 return -EOPNOTSUPP;
1147 }
1148
1149 err = mlx5e_xfrm_validate_policy(priv->mdev, x, extack);
1150 if (err)
1151 return err;
1152
1153 pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
1154 if (!pol_entry)
1155 return -ENOMEM;
1156
1157 pol_entry->x = x;
1158 pol_entry->ipsec = priv->ipsec;
1159
1160 if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
1161 err = -EBUSY;
1162 goto ipsec_busy;
1163 }
1164
1165 mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
1166 err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
1167 if (err)
1168 goto err_fs;
1169
1170 x->xdo.offload_handle = (unsigned long)pol_entry;
1171 return 0;
1172
1173 err_fs:
1174 mlx5_eswitch_unblock_ipsec(priv->mdev);
1175 ipsec_busy:
1176 kfree(pol_entry);
1177 NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
1178 return err;
1179 }
1180
mlx5e_xfrm_del_policy(struct xfrm_policy * x)1181 static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
1182 {
1183 struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1184
1185 mlx5e_accel_ipsec_fs_del_pol(pol_entry);
1186 mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
1187 }
1188
mlx5e_xfrm_free_policy(struct xfrm_policy * x)1189 static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
1190 {
1191 struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1192
1193 kfree(pol_entry);
1194 }
1195
1196 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
1197 .xdo_dev_state_add = mlx5e_xfrm_add_state,
1198 .xdo_dev_state_delete = mlx5e_xfrm_del_state,
1199 .xdo_dev_state_free = mlx5e_xfrm_free_state,
1200 .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
1201 .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
1202
1203 .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
1204 .xdo_dev_policy_add = mlx5e_xfrm_add_policy,
1205 .xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
1206 .xdo_dev_policy_free = mlx5e_xfrm_free_policy,
1207 };
1208
mlx5e_ipsec_build_netdev(struct mlx5e_priv * priv)1209 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
1210 {
1211 struct mlx5_core_dev *mdev = priv->mdev;
1212 struct net_device *netdev = priv->netdev;
1213
1214 if (!mlx5_ipsec_device_caps(mdev))
1215 return;
1216
1217 mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
1218
1219 netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
1220 netdev->features |= NETIF_F_HW_ESP;
1221 netdev->hw_enc_features |= NETIF_F_HW_ESP;
1222
1223 if (!MLX5_CAP_ETH(mdev, swp_csum)) {
1224 mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
1225 return;
1226 }
1227
1228 netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
1229 netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
1230
1231 if (!MLX5_CAP_ETH(mdev, swp_lso)) {
1232 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
1233 return;
1234 }
1235
1236 netdev->gso_partial_features |= NETIF_F_GSO_ESP;
1237 mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
1238 netdev->features |= NETIF_F_GSO_ESP;
1239 netdev->hw_features |= NETIF_F_GSO_ESP;
1240 netdev->hw_enc_features |= NETIF_F_GSO_ESP;
1241 }
1242