• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Atul Gupta (atul.gupta@chelsio.com)
36  */
37 
38 #define pr_fmt(fmt) "chcr:" fmt
39 
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/crypto.h>
43 #include <linux/cryptohash.h>
44 #include <linux/skbuff.h>
45 #include <linux/rtnetlink.h>
46 #include <linux/highmem.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/netdevice.h>
50 #include <net/esp.h>
51 #include <net/xfrm.h>
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/sha.h>
56 #include <crypto/authenc.h>
57 #include <crypto/internal/aead.h>
58 #include <crypto/null.h>
59 #include <crypto/internal/skcipher.h>
60 #include <crypto/aead.h>
61 #include <crypto/scatterwalk.h>
62 #include <crypto/internal/hash.h>
63 
64 #include "chcr_core.h"
65 #include "chcr_algo.h"
66 #include "chcr_crypto.h"
67 
68 /*
69  * Max Tx descriptor space we allow for an Ethernet packet to be inlined
70  * into a WR.
71  */
72 #define MAX_IMM_TX_PKT_LEN 256
73 #define GCM_ESP_IV_SIZE     8
74 
75 static int chcr_xfrm_add_state(struct xfrm_state *x);
76 static void chcr_xfrm_del_state(struct xfrm_state *x);
77 static void chcr_xfrm_free_state(struct xfrm_state *x);
78 static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
79 
80 static const struct xfrmdev_ops chcr_xfrmdev_ops = {
81 	.xdo_dev_state_add      = chcr_xfrm_add_state,
82 	.xdo_dev_state_delete   = chcr_xfrm_del_state,
83 	.xdo_dev_state_free     = chcr_xfrm_free_state,
84 	.xdo_dev_offload_ok     = chcr_ipsec_offload_ok,
85 };
86 
87 /* Add offload xfrms to Chelsio Interface */
chcr_add_xfrmops(const struct cxgb4_lld_info * lld)88 void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
89 {
90 	struct net_device *netdev = NULL;
91 	int i;
92 
93 	for (i = 0; i < lld->nports; i++) {
94 		netdev = lld->ports[i];
95 		if (!netdev)
96 			continue;
97 		netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
98 		netdev->hw_enc_features |= NETIF_F_HW_ESP;
99 		netdev->features |= NETIF_F_HW_ESP;
100 		rtnl_lock();
101 		netdev_change_features(netdev);
102 		rtnl_unlock();
103 	}
104 }
105 
chcr_ipsec_setauthsize(struct xfrm_state * x,struct ipsec_sa_entry * sa_entry)106 static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
107 					 struct ipsec_sa_entry *sa_entry)
108 {
109 	int hmac_ctrl;
110 	int authsize = x->aead->alg_icv_len / 8;
111 
112 	sa_entry->authsize = authsize;
113 
114 	switch (authsize) {
115 	case ICV_8:
116 		hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
117 		break;
118 	case ICV_12:
119 		hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
120 		break;
121 	case ICV_16:
122 		hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
123 		break;
124 	default:
125 		return -EINVAL;
126 	}
127 	return hmac_ctrl;
128 }
129 
chcr_ipsec_setkey(struct xfrm_state * x,struct ipsec_sa_entry * sa_entry)130 static inline int chcr_ipsec_setkey(struct xfrm_state *x,
131 				    struct ipsec_sa_entry *sa_entry)
132 {
133 	struct crypto_cipher *cipher;
134 	int keylen = (x->aead->alg_key_len + 7) / 8;
135 	unsigned char *key = x->aead->alg_key;
136 	int ck_size, key_ctx_size = 0;
137 	unsigned char ghash_h[AEAD_H_SIZE];
138 	int ret = 0;
139 
140 	if (keylen > 3) {
141 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
142 		memcpy(sa_entry->salt, key + keylen, 4);
143 	}
144 
145 	if (keylen == AES_KEYSIZE_128) {
146 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
147 	} else if (keylen == AES_KEYSIZE_192) {
148 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
149 	} else if (keylen == AES_KEYSIZE_256) {
150 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
151 	} else {
152 		pr_err("GCM: Invalid key length %d\n", keylen);
153 		ret = -EINVAL;
154 		goto out;
155 	}
156 
157 	memcpy(sa_entry->key, key, keylen);
158 	sa_entry->enckey_len = keylen;
159 	key_ctx_size = sizeof(struct _key_ctx) +
160 			      ((DIV_ROUND_UP(keylen, 16)) << 4) +
161 			      AEAD_H_SIZE;
162 
163 	sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
164 						 CHCR_KEYCTX_MAC_KEY_SIZE_128,
165 						 0, 0,
166 						 key_ctx_size >> 4);
167 
168 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
169 	 * It will go in key context
170 	 */
171 	cipher = crypto_alloc_cipher("aes-generic", 0, 0);
172 	if (IS_ERR(cipher)) {
173 		sa_entry->enckey_len = 0;
174 		ret = -ENOMEM;
175 		goto out;
176 	}
177 
178 	ret = crypto_cipher_setkey(cipher, key, keylen);
179 	if (ret) {
180 		sa_entry->enckey_len = 0;
181 		goto out1;
182 	}
183 	memset(ghash_h, 0, AEAD_H_SIZE);
184 	crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
185 	memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
186 	       16), ghash_h, AEAD_H_SIZE);
187 	sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
188 			      AEAD_H_SIZE;
189 out1:
190 	crypto_free_cipher(cipher);
191 out:
192 	return ret;
193 }
194 
195 /*
196  * chcr_xfrm_add_state
197  * returns 0 on success, negative error if failed to send message to FPGA
198  * positive error if FPGA returned a bad response
199  */
chcr_xfrm_add_state(struct xfrm_state * x)200 static int chcr_xfrm_add_state(struct xfrm_state *x)
201 {
202 	struct ipsec_sa_entry *sa_entry;
203 	int res = 0;
204 
205 	if (x->props.aalgo != SADB_AALG_NONE) {
206 		pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
207 		return -EINVAL;
208 	}
209 	if (x->props.calgo != SADB_X_CALG_NONE) {
210 		pr_debug("CHCR: Cannot offload compressed xfrm states\n");
211 		return -EINVAL;
212 	}
213 	if (x->props.flags & XFRM_STATE_ESN) {
214 		pr_debug("CHCR: Cannot offload ESN xfrm states\n");
215 		return -EINVAL;
216 	}
217 	if (x->props.family != AF_INET &&
218 	    x->props.family != AF_INET6) {
219 		pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
220 		return -EINVAL;
221 	}
222 	if (x->props.mode != XFRM_MODE_TRANSPORT &&
223 	    x->props.mode != XFRM_MODE_TUNNEL) {
224 		pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
225 		return -EINVAL;
226 	}
227 	if (x->id.proto != IPPROTO_ESP) {
228 		pr_debug("CHCR: Only ESP xfrm state offloaded\n");
229 		return -EINVAL;
230 	}
231 	if (x->encap) {
232 		pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
233 		return -EINVAL;
234 	}
235 	if (!x->aead) {
236 		pr_debug("CHCR: Cannot offload xfrm states without aead\n");
237 		return -EINVAL;
238 	}
239 	if (x->aead->alg_icv_len != 128 &&
240 	    x->aead->alg_icv_len != 96) {
241 		pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
242 	return -EINVAL;
243 	}
244 	if ((x->aead->alg_key_len != 128 + 32) &&
245 	    (x->aead->alg_key_len != 256 + 32)) {
246 		pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
247 		return -EINVAL;
248 	}
249 	if (x->tfcpad) {
250 		pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
251 		return -EINVAL;
252 	}
253 	if (!x->geniv) {
254 		pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
255 		return -EINVAL;
256 	}
257 	if (strcmp(x->geniv, "seqiv")) {
258 		pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
259 		return -EINVAL;
260 	}
261 
262 	sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
263 	if (!sa_entry) {
264 		res = -ENOMEM;
265 		goto out;
266 	}
267 
268 	sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
269 	chcr_ipsec_setkey(x, sa_entry);
270 	x->xso.offload_handle = (unsigned long)sa_entry;
271 	try_module_get(THIS_MODULE);
272 out:
273 	return res;
274 }
275 
chcr_xfrm_del_state(struct xfrm_state * x)276 static void chcr_xfrm_del_state(struct xfrm_state *x)
277 {
278 	/* do nothing */
279 	if (!x->xso.offload_handle)
280 		return;
281 }
282 
chcr_xfrm_free_state(struct xfrm_state * x)283 static void chcr_xfrm_free_state(struct xfrm_state *x)
284 {
285 	struct ipsec_sa_entry *sa_entry;
286 
287 	if (!x->xso.offload_handle)
288 		return;
289 
290 	sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
291 	kfree(sa_entry);
292 	module_put(THIS_MODULE);
293 }
294 
chcr_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)295 static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
296 {
297 	/* Offload with IP options is not supported yet */
298 	if (ip_hdr(skb)->ihl > 5)
299 		return false;
300 
301 	return true;
302 }
303 
is_eth_imm(const struct sk_buff * skb,unsigned int kctx_len)304 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
305 {
306 	int hdrlen;
307 
308 	hdrlen = sizeof(struct fw_ulptx_wr) +
309 		 sizeof(struct chcr_ipsec_req) + kctx_len;
310 
311 	hdrlen += sizeof(struct cpl_tx_pkt);
312 	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
313 		return hdrlen;
314 	return 0;
315 }
316 
calc_tx_sec_flits(const struct sk_buff * skb,unsigned int kctx_len)317 static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
318 					     unsigned int kctx_len)
319 {
320 	unsigned int flits;
321 	int hdrlen = is_eth_imm(skb, kctx_len);
322 
323 	/* If the skb is small enough, we can pump it out as a work request
324 	 * with only immediate data.  In that case we just have to have the
325 	 * TX Packet header plus the skb data in the Work Request.
326 	 */
327 
328 	if (hdrlen)
329 		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
330 
331 	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
332 
333 	/* Otherwise, we're going to have to construct a Scatter gather list
334 	 * of the skb body and fragments.  We also include the flits necessary
335 	 * for the TX Packet Work Request and CPL.  We always have a firmware
336 	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
337 	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
338 	 * message or, if we're doing a Large Send Offload, an LSO CPL message
339 	 * with an embedded TX Packet Write CPL message.
340 	 */
341 	flits += (sizeof(struct fw_ulptx_wr) +
342 		  sizeof(struct chcr_ipsec_req) +
343 		  kctx_len +
344 		  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
345 	return flits;
346 }
347 
copy_cpltx_pktxt(struct sk_buff * skb,struct net_device * dev,void * pos)348 inline void *copy_cpltx_pktxt(struct sk_buff *skb,
349 				struct net_device *dev,
350 				void *pos)
351 {
352 	struct cpl_tx_pkt_core *cpl;
353 	struct sge_eth_txq *q;
354 	struct adapter *adap;
355 	struct port_info *pi;
356 	u32 ctrl0, qidx;
357 	u64 cntrl = 0;
358 	int left;
359 
360 	pi = netdev_priv(dev);
361 	adap = pi->adapter;
362 	qidx = skb->queue_mapping;
363 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
364 
365 	left = (void *)q->q.stat - pos;
366 	if (!left)
367 		pos = q->q.desc;
368 
369 	cpl = (struct cpl_tx_pkt_core *)pos;
370 
371 	cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
372 	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
373 			       TXPKT_PF_V(adap->pf);
374 	if (skb_vlan_tag_present(skb)) {
375 		q->vlan_ins++;
376 		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
377 	}
378 
379 	cpl->ctrl0 = htonl(ctrl0);
380 	cpl->pack = htons(0);
381 	cpl->len = htons(skb->len);
382 	cpl->ctrl1 = cpu_to_be64(cntrl);
383 
384 	pos += sizeof(struct cpl_tx_pkt_core);
385 	return pos;
386 }
387 
copy_key_cpltx_pktxt(struct sk_buff * skb,struct net_device * dev,void * pos,struct ipsec_sa_entry * sa_entry)388 inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
389 				struct net_device *dev,
390 				void *pos,
391 				struct ipsec_sa_entry *sa_entry)
392 {
393 	struct _key_ctx *key_ctx;
394 	int left, eoq, key_len;
395 	struct sge_eth_txq *q;
396 	struct adapter *adap;
397 	struct port_info *pi;
398 	unsigned int qidx;
399 
400 	pi = netdev_priv(dev);
401 	adap = pi->adapter;
402 	qidx = skb->queue_mapping;
403 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
404 	key_len = sa_entry->kctx_len;
405 
406 	/* end of queue, reset pos to start of queue */
407 	eoq = (void *)q->q.stat - pos;
408 	left = eoq;
409 	if (!eoq) {
410 		pos = q->q.desc;
411 		left = 64 * q->q.size;
412 	}
413 
414 	/* Copy the Key context header */
415 	key_ctx = (struct _key_ctx *)pos;
416 	key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
417 	memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
418 	pos += sizeof(struct _key_ctx);
419 	left -= sizeof(struct _key_ctx);
420 
421 	if (likely(key_len <= left)) {
422 		memcpy(key_ctx->key, sa_entry->key, key_len);
423 		pos += key_len;
424 	} else {
425 		memcpy(pos, sa_entry->key, left);
426 		memcpy(q->q.desc, sa_entry->key + left,
427 		       key_len - left);
428 		pos = (u8 *)q->q.desc + (key_len - left);
429 	}
430 	/* Copy CPL TX PKT XT */
431 	pos = copy_cpltx_pktxt(skb, dev, pos);
432 
433 	return pos;
434 }
435 
chcr_crypto_wreq(struct sk_buff * skb,struct net_device * dev,void * pos,int credits,struct ipsec_sa_entry * sa_entry)436 inline void *chcr_crypto_wreq(struct sk_buff *skb,
437 			       struct net_device *dev,
438 			       void *pos,
439 			       int credits,
440 			       struct ipsec_sa_entry *sa_entry)
441 {
442 	struct port_info *pi = netdev_priv(dev);
443 	struct adapter *adap = pi->adapter;
444 	unsigned int immdatalen = 0;
445 	unsigned int ivsize = GCM_ESP_IV_SIZE;
446 	struct chcr_ipsec_wr *wr;
447 	unsigned int flits;
448 	u32 wr_mid;
449 	int qidx = skb_get_queue_mapping(skb);
450 	struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
451 	unsigned int kctx_len = sa_entry->kctx_len;
452 	int qid = q->q.cntxt_id;
453 
454 	atomic_inc(&adap->chcr_stats.ipsec_cnt);
455 
456 	flits = calc_tx_sec_flits(skb, kctx_len);
457 
458 	if (is_eth_imm(skb, kctx_len))
459 		immdatalen = skb->len;
460 
461 	/* WR Header */
462 	wr = (struct chcr_ipsec_wr *)pos;
463 	wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
464 	wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
465 
466 	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
467 		netif_tx_stop_queue(q->txq);
468 		q->q.stops++;
469 		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
470 	}
471 	wr_mid |= FW_ULPTX_WR_DATA_F;
472 	wr->wreq.flowid_len16 = htonl(wr_mid);
473 
474 	/* ULPTX */
475 	wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
476 	wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2)  - 1);
477 
478 	/* Sub-command */
479 	wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
480 	wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
481 					 sizeof(wr->req.key_ctx) +
482 					 kctx_len +
483 					 sizeof(struct cpl_tx_pkt_core) +
484 					 immdatalen);
485 
486 	/* CPL_SEC_PDU */
487 	wr->req.sec_cpl.op_ivinsrtofst = htonl(
488 				CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
489 				CPL_TX_SEC_PDU_CPLLEN_V(2) |
490 				CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
491 				CPL_TX_SEC_PDU_IVINSRTOFST_V(
492 				(skb_transport_offset(skb) +
493 				sizeof(struct ip_esp_hdr) + 1)));
494 
495 	wr->req.sec_cpl.pldlen = htonl(skb->len);
496 
497 	wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
498 				(skb_transport_offset(skb) + 1),
499 				(skb_transport_offset(skb) +
500 				 sizeof(struct ip_esp_hdr)),
501 				(skb_transport_offset(skb) +
502 				 sizeof(struct ip_esp_hdr) +
503 				 GCM_ESP_IV_SIZE + 1), 0);
504 
505 	wr->req.sec_cpl.cipherstop_lo_authinsert =
506 		FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
507 					   sizeof(struct ip_esp_hdr) +
508 					   GCM_ESP_IV_SIZE + 1,
509 					   sa_entry->authsize,
510 					   sa_entry->authsize);
511 	wr->req.sec_cpl.seqno_numivs =
512 		FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
513 					 CHCR_SCMD_CIPHER_MODE_AES_GCM,
514 					 CHCR_SCMD_AUTH_MODE_GHASH,
515 					 sa_entry->hmac_ctrl,
516 					 ivsize >> 1);
517 	wr->req.sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
518 								  0, 0, 0);
519 
520 	pos += sizeof(struct fw_ulptx_wr) +
521 	       sizeof(struct ulp_txpkt) +
522 	       sizeof(struct ulptx_idata) +
523 	       sizeof(struct cpl_tx_sec_pdu);
524 
525 	pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
526 
527 	return pos;
528 }
529 
530 /**
531  *      flits_to_desc - returns the num of Tx descriptors for the given flits
532  *      @n: the number of flits
533  *
534  *      Returns the number of Tx descriptors needed for the supplied number
535  *      of flits.
536  */
flits_to_desc(unsigned int n)537 static inline unsigned int flits_to_desc(unsigned int n)
538 {
539 	WARN_ON(n > SGE_MAX_WR_LEN / 8);
540 	return DIV_ROUND_UP(n, 8);
541 }
542 
txq_avail(const struct sge_txq * q)543 static inline unsigned int txq_avail(const struct sge_txq *q)
544 {
545 	return q->size - 1 - q->in_use;
546 }
547 
eth_txq_stop(struct sge_eth_txq * q)548 static void eth_txq_stop(struct sge_eth_txq *q)
549 {
550 	netif_tx_stop_queue(q->txq);
551 	q->q.stops++;
552 }
553 
txq_advance(struct sge_txq * q,unsigned int n)554 static inline void txq_advance(struct sge_txq *q, unsigned int n)
555 {
556 	q->in_use += n;
557 	q->pidx += n;
558 	if (q->pidx >= q->size)
559 		q->pidx -= q->size;
560 }
561 
562 /*
563  *      chcr_ipsec_xmit called from ULD Tx handler
564  */
chcr_ipsec_xmit(struct sk_buff * skb,struct net_device * dev)565 int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
566 {
567 	struct xfrm_state *x = xfrm_input_state(skb);
568 	struct ipsec_sa_entry *sa_entry;
569 	u64 *pos, *end, *before, *sgl;
570 	int qidx, left, credits;
571 	unsigned int flits = 0, ndesc, kctx_len;
572 	struct adapter *adap;
573 	struct sge_eth_txq *q;
574 	struct port_info *pi;
575 	dma_addr_t addr[MAX_SKB_FRAGS + 1];
576 	bool immediate = false;
577 
578 	if (!x->xso.offload_handle)
579 		return NETDEV_TX_BUSY;
580 
581 	sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
582 	kctx_len = sa_entry->kctx_len;
583 
584 	if (skb->sp->len != 1) {
585 out_free:       dev_kfree_skb_any(skb);
586 		return NETDEV_TX_OK;
587 	}
588 
589 	pi = netdev_priv(dev);
590 	adap = pi->adapter;
591 	qidx = skb->queue_mapping;
592 	q = &adap->sge.ethtxq[qidx + pi->first_qset];
593 
594 	cxgb4_reclaim_completed_tx(adap, &q->q, true);
595 
596 	flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
597 	ndesc = flits_to_desc(flits);
598 	credits = txq_avail(&q->q) - ndesc;
599 
600 	if (unlikely(credits < 0)) {
601 		eth_txq_stop(q);
602 		dev_err(adap->pdev_dev,
603 			"%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
604 			dev->name, qidx, credits, ndesc, txq_avail(&q->q),
605 			flits);
606 		return NETDEV_TX_BUSY;
607 	}
608 
609 	if (is_eth_imm(skb, kctx_len))
610 		immediate = true;
611 
612 	if (!immediate &&
613 	    unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
614 		q->mapping_err++;
615 		goto out_free;
616 	}
617 
618 	pos = (u64 *)&q->q.desc[q->q.pidx];
619 	before = (u64 *)pos;
620 	end = (u64 *)pos + flits;
621 	/* Setup IPSec CPL */
622 	pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
623 				       credits, sa_entry);
624 	if (before > (u64 *)pos) {
625 		left = (u8 *)end - (u8 *)q->q.stat;
626 		end = (void *)q->q.desc + left;
627 	}
628 	if (pos == (u64 *)q->q.stat) {
629 		left = (u8 *)end - (u8 *)q->q.stat;
630 		end = (void *)q->q.desc + left;
631 		pos = (void *)q->q.desc;
632 	}
633 
634 	sgl = (void *)pos;
635 	if (immediate) {
636 		cxgb4_inline_tx_skb(skb, &q->q, sgl);
637 		dev_consume_skb_any(skb);
638 	} else {
639 		int last_desc;
640 
641 		cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
642 				0, addr);
643 		skb_orphan(skb);
644 
645 		last_desc = q->q.pidx + ndesc - 1;
646 		if (last_desc >= q->q.size)
647 			last_desc -= q->q.size;
648 		q->q.sdesc[last_desc].skb = skb;
649 		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
650 	}
651 	txq_advance(&q->q, ndesc);
652 
653 	cxgb4_ring_tx_db(adap, &q->q, ndesc);
654 	return NETDEV_TX_OK;
655 }
656