• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 #include <linux/module.h>
39 
40 #include "en.h"
41 #include "en_accel/ipsec.h"
42 #include "en_accel/ipsec_rxtx.h"
43 #include "en_accel/ipsec_fs.h"
44 
to_ipsec_sa_entry(struct xfrm_state * x)45 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
46 {
47 	struct mlx5e_ipsec_sa_entry *sa;
48 
49 	if (!x)
50 		return NULL;
51 
52 	sa = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
53 	if (!sa)
54 		return NULL;
55 
56 	WARN_ON(sa->x != x);
57 	return sa;
58 }
59 
mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec * ipsec,unsigned int handle)60 struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
61 					      unsigned int handle)
62 {
63 	struct mlx5e_ipsec_sa_entry *sa_entry;
64 	struct xfrm_state *ret = NULL;
65 
66 	rcu_read_lock();
67 	hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
68 		if (sa_entry->handle == handle) {
69 			ret = sa_entry->x;
70 			xfrm_state_hold(ret);
71 			break;
72 		}
73 	rcu_read_unlock();
74 
75 	return ret;
76 }
77 
mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry * sa_entry,unsigned int handle)78 static int  mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry,
79 				    unsigned int handle)
80 {
81 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
82 	struct mlx5e_ipsec_sa_entry *_sa_entry;
83 	unsigned long flags;
84 
85 	rcu_read_lock();
86 	hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
87 		if (_sa_entry->handle == handle) {
88 			rcu_read_unlock();
89 			return  -EEXIST;
90 		}
91 	rcu_read_unlock();
92 
93 	spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
94 	sa_entry->handle = handle;
95 	hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
96 	spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
97 
98 	return 0;
99 }
100 
mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry * sa_entry)101 static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
102 {
103 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
104 	unsigned long flags;
105 
106 	spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
107 	hash_del_rcu(&sa_entry->hlist);
108 	spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
109 }
110 
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry)111 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
112 {
113 	struct xfrm_replay_state_esn *replay_esn;
114 	u32 seq_bottom = 0;
115 	u8 overlap;
116 
117 	if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
118 		sa_entry->esn_state.trigger = 0;
119 		return false;
120 	}
121 
122 	replay_esn = sa_entry->x->replay_esn;
123 	if (replay_esn->seq >= replay_esn->replay_window)
124 		seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
125 
126 	overlap = sa_entry->esn_state.overlap;
127 
128 	sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
129 						    htonl(seq_bottom));
130 
131 	sa_entry->esn_state.trigger = 1;
132 	if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
133 		sa_entry->esn_state.overlap = 0;
134 		return true;
135 	} else if (unlikely(!overlap &&
136 			    (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
137 		sa_entry->esn_state.overlap = 1;
138 		return true;
139 	}
140 
141 	return false;
142 }
143 
144 static void
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)145 mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
146 				   struct mlx5_accel_esp_xfrm_attrs *attrs)
147 {
148 	struct xfrm_state *x = sa_entry->x;
149 	struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
150 	struct aead_geniv_ctx *geniv_ctx;
151 	struct crypto_aead *aead;
152 	unsigned int crypto_data_len, key_len;
153 	int ivsize;
154 
155 	memset(attrs, 0, sizeof(*attrs));
156 
157 	/* key */
158 	crypto_data_len = (x->aead->alg_key_len + 7) / 8;
159 	key_len = crypto_data_len - 4; /* 4 bytes salt at end */
160 
161 	memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
162 	aes_gcm->key_len = key_len * 8;
163 
164 	/* salt and seq_iv */
165 	aead = x->data;
166 	geniv_ctx = crypto_aead_ctx(aead);
167 	ivsize = crypto_aead_ivsize(aead);
168 	memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
169 	memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
170 	       sizeof(aes_gcm->salt));
171 
172 	/* iv len */
173 	aes_gcm->icv_len = x->aead->alg_icv_len;
174 
175 	/* esn */
176 	if (sa_entry->esn_state.trigger) {
177 		attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
178 		attrs->esn = sa_entry->esn_state.esn;
179 		if (sa_entry->esn_state.overlap)
180 			attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
181 	}
182 
183 	/* rx handle */
184 	attrs->sa_handle = sa_entry->handle;
185 
186 	/* algo type */
187 	attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
188 
189 	/* action */
190 	attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
191 			MLX5_ACCEL_ESP_ACTION_ENCRYPT :
192 			MLX5_ACCEL_ESP_ACTION_DECRYPT;
193 	/* flags */
194 	attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
195 			MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
196 			MLX5_ACCEL_ESP_FLAGS_TUNNEL;
197 
198 	/* spi */
199 	attrs->spi = x->id.spi;
200 
201 	/* source , destination ips */
202 	memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
203 	memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
204 	attrs->is_ipv6 = (x->props.family != AF_INET);
205 }
206 
mlx5e_xfrm_validate_state(struct xfrm_state * x)207 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
208 {
209 	struct net_device *netdev = x->xso.real_dev;
210 	struct mlx5e_priv *priv;
211 
212 	priv = netdev_priv(netdev);
213 
214 	if (x->props.aalgo != SADB_AALG_NONE) {
215 		netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
216 		return -EINVAL;
217 	}
218 	if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
219 		netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
220 		return -EINVAL;
221 	}
222 	if (x->props.calgo != SADB_X_CALG_NONE) {
223 		netdev_info(netdev, "Cannot offload compressed xfrm states\n");
224 		return -EINVAL;
225 	}
226 	if (x->props.flags & XFRM_STATE_ESN &&
227 	    !(mlx5_accel_ipsec_device_caps(priv->mdev) &
228 	    MLX5_ACCEL_IPSEC_CAP_ESN)) {
229 		netdev_info(netdev, "Cannot offload ESN xfrm states\n");
230 		return -EINVAL;
231 	}
232 	if (x->props.family != AF_INET &&
233 	    x->props.family != AF_INET6) {
234 		netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
235 		return -EINVAL;
236 	}
237 	if (x->props.mode != XFRM_MODE_TRANSPORT &&
238 	    x->props.mode != XFRM_MODE_TUNNEL) {
239 		dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
240 		return -EINVAL;
241 	}
242 	if (x->id.proto != IPPROTO_ESP) {
243 		netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
244 		return -EINVAL;
245 	}
246 	if (x->encap) {
247 		netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
248 		return -EINVAL;
249 	}
250 	if (!x->aead) {
251 		netdev_info(netdev, "Cannot offload xfrm states without aead\n");
252 		return -EINVAL;
253 	}
254 	if (x->aead->alg_icv_len != 128) {
255 		netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
256 		return -EINVAL;
257 	}
258 	if ((x->aead->alg_key_len != 128 + 32) &&
259 	    (x->aead->alg_key_len != 256 + 32)) {
260 		netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
261 		return -EINVAL;
262 	}
263 	if (x->tfcpad) {
264 		netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
265 		return -EINVAL;
266 	}
267 	if (!x->geniv) {
268 		netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
269 		return -EINVAL;
270 	}
271 	if (strcmp(x->geniv, "seqiv")) {
272 		netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
273 		return -EINVAL;
274 	}
275 	if (x->props.family == AF_INET6 &&
276 	    !(mlx5_accel_ipsec_device_caps(priv->mdev) &
277 	     MLX5_ACCEL_IPSEC_CAP_IPV6)) {
278 		netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
279 		return -EINVAL;
280 	}
281 	return 0;
282 }
283 
mlx5e_xfrm_fs_add_rule(struct mlx5e_priv * priv,struct mlx5e_ipsec_sa_entry * sa_entry)284 static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
285 				  struct mlx5e_ipsec_sa_entry *sa_entry)
286 {
287 	if (!mlx5_is_ipsec_device(priv->mdev))
288 		return 0;
289 
290 	return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
291 					     sa_entry->ipsec_obj_id,
292 					     &sa_entry->ipsec_rule);
293 }
294 
mlx5e_xfrm_fs_del_rule(struct mlx5e_priv * priv,struct mlx5e_ipsec_sa_entry * sa_entry)295 static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
296 				   struct mlx5e_ipsec_sa_entry *sa_entry)
297 {
298 	if (!mlx5_is_ipsec_device(priv->mdev))
299 		return;
300 
301 	mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
302 				      &sa_entry->ipsec_rule);
303 }
304 
mlx5e_xfrm_add_state(struct xfrm_state * x)305 static int mlx5e_xfrm_add_state(struct xfrm_state *x)
306 {
307 	struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
308 	struct net_device *netdev = x->xso.real_dev;
309 	struct mlx5_accel_esp_xfrm_attrs attrs;
310 	struct mlx5e_priv *priv;
311 	unsigned int sa_handle;
312 	int err;
313 
314 	priv = netdev_priv(netdev);
315 
316 	err = mlx5e_xfrm_validate_state(x);
317 	if (err)
318 		return err;
319 
320 	sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
321 	if (!sa_entry) {
322 		err = -ENOMEM;
323 		goto out;
324 	}
325 
326 	sa_entry->x = x;
327 	sa_entry->ipsec = priv->ipsec;
328 
329 	/* check esn */
330 	mlx5e_ipsec_update_esn_state(sa_entry);
331 
332 	/* create xfrm */
333 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
334 	sa_entry->xfrm =
335 		mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
336 					   MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
337 	if (IS_ERR(sa_entry->xfrm)) {
338 		err = PTR_ERR(sa_entry->xfrm);
339 		goto err_sa_entry;
340 	}
341 
342 	/* create hw context */
343 	sa_entry->hw_context =
344 			mlx5_accel_esp_create_hw_context(priv->mdev,
345 							 sa_entry->xfrm,
346 							 &sa_handle);
347 	if (IS_ERR(sa_entry->hw_context)) {
348 		err = PTR_ERR(sa_entry->hw_context);
349 		goto err_xfrm;
350 	}
351 
352 	sa_entry->ipsec_obj_id = sa_handle;
353 	err = mlx5e_xfrm_fs_add_rule(priv, sa_entry);
354 	if (err)
355 		goto err_hw_ctx;
356 
357 	if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
358 		err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
359 		if (err)
360 			goto err_add_rule;
361 	} else {
362 		sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
363 				mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
364 	}
365 
366 	x->xso.offload_handle = (unsigned long)sa_entry;
367 	goto out;
368 
369 err_add_rule:
370 	mlx5e_xfrm_fs_del_rule(priv, sa_entry);
371 err_hw_ctx:
372 	mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context);
373 err_xfrm:
374 	mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
375 err_sa_entry:
376 	kfree(sa_entry);
377 
378 out:
379 	return err;
380 }
381 
mlx5e_xfrm_del_state(struct xfrm_state * x)382 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
383 {
384 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
385 
386 	if (!sa_entry)
387 		return;
388 
389 	if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
390 		mlx5e_ipsec_sadb_rx_del(sa_entry);
391 }
392 
mlx5e_xfrm_free_state(struct xfrm_state * x)393 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
394 {
395 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
396 	struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
397 
398 	if (!sa_entry)
399 		return;
400 
401 	if (sa_entry->hw_context) {
402 		flush_workqueue(sa_entry->ipsec->wq);
403 		mlx5e_xfrm_fs_del_rule(priv, sa_entry);
404 		mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context);
405 		mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
406 	}
407 
408 	kfree(sa_entry);
409 }
410 
mlx5e_ipsec_init(struct mlx5e_priv * priv)411 int mlx5e_ipsec_init(struct mlx5e_priv *priv)
412 {
413 	struct mlx5e_ipsec *ipsec = NULL;
414 
415 	if (!MLX5_IPSEC_DEV(priv->mdev)) {
416 		netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
417 		return 0;
418 	}
419 
420 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
421 	if (!ipsec)
422 		return -ENOMEM;
423 
424 	hash_init(ipsec->sadb_rx);
425 	spin_lock_init(&ipsec->sadb_rx_lock);
426 	ida_init(&ipsec->halloc);
427 	ipsec->en_priv = priv;
428 	ipsec->no_trailer = !!(mlx5_accel_ipsec_device_caps(priv->mdev) &
429 			       MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER);
430 	ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
431 					    priv->netdev->name);
432 	if (!ipsec->wq) {
433 		kfree(ipsec);
434 		return -ENOMEM;
435 	}
436 
437 	priv->ipsec = ipsec;
438 	mlx5e_accel_ipsec_fs_init(priv);
439 	netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
440 	return 0;
441 }
442 
mlx5e_ipsec_cleanup(struct mlx5e_priv * priv)443 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
444 {
445 	struct mlx5e_ipsec *ipsec = priv->ipsec;
446 
447 	if (!ipsec)
448 		return;
449 
450 	mlx5e_accel_ipsec_fs_cleanup(priv);
451 	destroy_workqueue(ipsec->wq);
452 
453 	ida_destroy(&ipsec->halloc);
454 	kfree(ipsec);
455 	priv->ipsec = NULL;
456 }
457 
mlx5e_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)458 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
459 {
460 	if (x->props.family == AF_INET) {
461 		/* Offload with IPv4 options is not supported yet */
462 		if (ip_hdr(skb)->ihl > 5)
463 			return false;
464 	} else {
465 		/* Offload with IPv6 extension headers is not support yet */
466 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
467 			return false;
468 	}
469 
470 	return true;
471 }
472 
473 struct mlx5e_ipsec_modify_state_work {
474 	struct work_struct		work;
475 	struct mlx5_accel_esp_xfrm_attrs attrs;
476 	struct mlx5e_ipsec_sa_entry	*sa_entry;
477 };
478 
_update_xfrm_state(struct work_struct * work)479 static void _update_xfrm_state(struct work_struct *work)
480 {
481 	int ret;
482 	struct mlx5e_ipsec_modify_state_work *modify_work =
483 		container_of(work, struct mlx5e_ipsec_modify_state_work, work);
484 	struct mlx5e_ipsec_sa_entry *sa_entry = modify_work->sa_entry;
485 
486 	ret = mlx5_accel_esp_modify_xfrm(sa_entry->xfrm,
487 					 &modify_work->attrs);
488 	if (ret)
489 		netdev_warn(sa_entry->ipsec->en_priv->netdev,
490 			    "Not an IPSec offload device\n");
491 
492 	kfree(modify_work);
493 }
494 
mlx5e_xfrm_advance_esn_state(struct xfrm_state * x)495 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
496 {
497 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
498 	struct mlx5e_ipsec_modify_state_work *modify_work;
499 	bool need_update;
500 
501 	if (!sa_entry)
502 		return;
503 
504 	need_update = mlx5e_ipsec_update_esn_state(sa_entry);
505 	if (!need_update)
506 		return;
507 
508 	modify_work = kzalloc(sizeof(*modify_work), GFP_ATOMIC);
509 	if (!modify_work)
510 		return;
511 
512 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
513 	modify_work->sa_entry = sa_entry;
514 
515 	INIT_WORK(&modify_work->work, _update_xfrm_state);
516 	WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work));
517 }
518 
519 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
520 	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
521 	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
522 	.xdo_dev_state_free	= mlx5e_xfrm_free_state,
523 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
524 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
525 };
526 
mlx5e_ipsec_build_netdev(struct mlx5e_priv * priv)527 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
528 {
529 	struct mlx5_core_dev *mdev = priv->mdev;
530 	struct net_device *netdev = priv->netdev;
531 
532 	if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
533 	    !MLX5_CAP_ETH(mdev, swp)) {
534 		mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
535 		return;
536 	}
537 
538 	mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
539 	netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
540 	netdev->features |= NETIF_F_HW_ESP;
541 	netdev->hw_enc_features |= NETIF_F_HW_ESP;
542 
543 	if (!MLX5_CAP_ETH(mdev, swp_csum)) {
544 		mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
545 		return;
546 	}
547 
548 	netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
549 	netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
550 
551 	if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_LSO) ||
552 	    !MLX5_CAP_ETH(mdev, swp_lso)) {
553 		mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
554 		return;
555 	}
556 
557 	if (mlx5_is_ipsec_device(mdev))
558 		netdev->gso_partial_features |= NETIF_F_GSO_ESP;
559 
560 	mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
561 	netdev->features |= NETIF_F_GSO_ESP;
562 	netdev->hw_features |= NETIF_F_GSO_ESP;
563 	netdev->hw_enc_features |= NETIF_F_GSO_ESP;
564 }
565