• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/skbuff.h>
48 #include <linux/rtnetlink.h>
49 #include <linux/highmem.h>
50 #include <linux/scatterlist.h>
51 
52 #include <crypto/aes.h>
53 #include <crypto/algapi.h>
54 #include <crypto/hash.h>
55 #include <crypto/gcm.h>
56 #include <crypto/sha.h>
57 #include <crypto/authenc.h>
58 #include <crypto/ctr.h>
59 #include <crypto/gf128mul.h>
60 #include <crypto/internal/aead.h>
61 #include <crypto/null.h>
62 #include <crypto/internal/skcipher.h>
63 #include <crypto/aead.h>
64 #include <crypto/scatterwalk.h>
65 #include <crypto/internal/hash.h>
66 
67 #include "t4fw_api.h"
68 #include "t4_msg.h"
69 #include "chcr_core.h"
70 #include "chcr_algo.h"
71 #include "chcr_crypto.h"
72 
73 #define IV AES_BLOCK_SIZE
74 
75 static unsigned int sgl_ent_len[] = {
76 	0, 0, 16, 24, 40, 48, 64, 72, 88,
77 	96, 112, 120, 136, 144, 160, 168, 184,
78 	192, 208, 216, 232, 240, 256, 264, 280,
79 	288, 304, 312, 328, 336, 352, 360, 376
80 };
81 
82 static unsigned int dsgl_ent_len[] = {
83 	0, 32, 32, 48, 48, 64, 64, 80, 80,
84 	112, 112, 128, 128, 144, 144, 160, 160,
85 	192, 192, 208, 208, 224, 224, 240, 240,
86 	272, 272, 288, 288, 304, 304, 320, 320
87 };
88 
89 static u32 round_constant[11] = {
90 	0x01000000, 0x02000000, 0x04000000, 0x08000000,
91 	0x10000000, 0x20000000, 0x40000000, 0x80000000,
92 	0x1B000000, 0x36000000, 0x6C000000
93 };
94 
95 static int chcr_handle_cipher_resp(struct skcipher_request *req,
96 				   unsigned char *input, int err);
97 
AEAD_CTX(struct chcr_context * ctx)98 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
99 {
100 	return ctx->crypto_ctx->aeadctx;
101 }
102 
ABLK_CTX(struct chcr_context * ctx)103 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
104 {
105 	return ctx->crypto_ctx->ablkctx;
106 }
107 
HMAC_CTX(struct chcr_context * ctx)108 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
109 {
110 	return ctx->crypto_ctx->hmacctx;
111 }
112 
GCM_CTX(struct chcr_aead_ctx * gctx)113 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
114 {
115 	return gctx->ctx->gcm;
116 }
117 
AUTHENC_CTX(struct chcr_aead_ctx * gctx)118 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
119 {
120 	return gctx->ctx->authenc;
121 }
122 
ULD_CTX(struct chcr_context * ctx)123 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
124 {
125 	return container_of(ctx->dev, struct uld_ctx, dev);
126 }
127 
is_ofld_imm(const struct sk_buff * skb)128 static inline int is_ofld_imm(const struct sk_buff *skb)
129 {
130 	return (skb->len <= SGE_MAX_WR_LEN);
131 }
132 
chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx * reqctx)133 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
134 {
135 	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
136 }
137 
sg_nents_xlen(struct scatterlist * sg,unsigned int reqlen,unsigned int entlen,unsigned int skip)138 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
139 			 unsigned int entlen,
140 			 unsigned int skip)
141 {
142 	int nents = 0;
143 	unsigned int less;
144 	unsigned int skip_len = 0;
145 
146 	while (sg && skip) {
147 		if (sg_dma_len(sg) <= skip) {
148 			skip -= sg_dma_len(sg);
149 			skip_len = 0;
150 			sg = sg_next(sg);
151 		} else {
152 			skip_len = skip;
153 			skip = 0;
154 		}
155 	}
156 
157 	while (sg && reqlen) {
158 		less = min(reqlen, sg_dma_len(sg) - skip_len);
159 		nents += DIV_ROUND_UP(less, entlen);
160 		reqlen -= less;
161 		skip_len = 0;
162 		sg = sg_next(sg);
163 	}
164 	return nents;
165 }
166 
get_aead_subtype(struct crypto_aead * aead)167 static inline int get_aead_subtype(struct crypto_aead *aead)
168 {
169 	struct aead_alg *alg = crypto_aead_alg(aead);
170 	struct chcr_alg_template *chcr_crypto_alg =
171 		container_of(alg, struct chcr_alg_template, alg.aead);
172 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
173 }
174 
chcr_verify_tag(struct aead_request * req,u8 * input,int * err)175 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
176 {
177 	u8 temp[SHA512_DIGEST_SIZE];
178 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
179 	int authsize = crypto_aead_authsize(tfm);
180 	struct cpl_fw6_pld *fw6_pld;
181 	int cmp = 0;
182 
183 	fw6_pld = (struct cpl_fw6_pld *)input;
184 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
185 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
186 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
187 	} else {
188 
189 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
190 				authsize, req->assoclen +
191 				req->cryptlen - authsize);
192 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
193 	}
194 	if (cmp)
195 		*err = -EBADMSG;
196 	else
197 		*err = 0;
198 }
199 
chcr_inc_wrcount(struct chcr_dev * dev)200 static int chcr_inc_wrcount(struct chcr_dev *dev)
201 {
202 	if (dev->state == CHCR_DETACH)
203 		return 1;
204 	atomic_inc(&dev->inflight);
205 	return 0;
206 }
207 
chcr_dec_wrcount(struct chcr_dev * dev)208 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
209 {
210 	atomic_dec(&dev->inflight);
211 }
212 
chcr_handle_aead_resp(struct aead_request * req,unsigned char * input,int err)213 static inline int chcr_handle_aead_resp(struct aead_request *req,
214 					 unsigned char *input,
215 					 int err)
216 {
217 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
218 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
219 	struct chcr_dev *dev = a_ctx(tfm)->dev;
220 
221 	chcr_aead_common_exit(req);
222 	if (reqctx->verify == VERIFY_SW) {
223 		chcr_verify_tag(req, input, &err);
224 		reqctx->verify = VERIFY_HW;
225 	}
226 	chcr_dec_wrcount(dev);
227 	req->base.complete(&req->base, err);
228 
229 	return err;
230 }
231 
get_aes_decrypt_key(unsigned char * dec_key,const unsigned char * key,unsigned int keylength)232 static void get_aes_decrypt_key(unsigned char *dec_key,
233 				       const unsigned char *key,
234 				       unsigned int keylength)
235 {
236 	u32 temp;
237 	u32 w_ring[MAX_NK];
238 	int i, j, k;
239 	u8  nr, nk;
240 
241 	switch (keylength) {
242 	case AES_KEYLENGTH_128BIT:
243 		nk = KEYLENGTH_4BYTES;
244 		nr = NUMBER_OF_ROUNDS_10;
245 		break;
246 	case AES_KEYLENGTH_192BIT:
247 		nk = KEYLENGTH_6BYTES;
248 		nr = NUMBER_OF_ROUNDS_12;
249 		break;
250 	case AES_KEYLENGTH_256BIT:
251 		nk = KEYLENGTH_8BYTES;
252 		nr = NUMBER_OF_ROUNDS_14;
253 		break;
254 	default:
255 		return;
256 	}
257 	for (i = 0; i < nk; i++)
258 		w_ring[i] = get_unaligned_be32(&key[i * 4]);
259 
260 	i = 0;
261 	temp = w_ring[nk - 1];
262 	while (i + nk < (nr + 1) * 4) {
263 		if (!(i % nk)) {
264 			/* RotWord(temp) */
265 			temp = (temp << 8) | (temp >> 24);
266 			temp = aes_ks_subword(temp);
267 			temp ^= round_constant[i / nk];
268 		} else if (nk == 8 && (i % 4 == 0)) {
269 			temp = aes_ks_subword(temp);
270 		}
271 		w_ring[i % nk] ^= temp;
272 		temp = w_ring[i % nk];
273 		i++;
274 	}
275 	i--;
276 	for (k = 0, j = i % nk; k < nk; k++) {
277 		put_unaligned_be32(w_ring[j], &dec_key[k * 4]);
278 		j--;
279 		if (j < 0)
280 			j += nk;
281 	}
282 }
283 
chcr_alloc_shash(unsigned int ds)284 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
285 {
286 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
287 
288 	switch (ds) {
289 	case SHA1_DIGEST_SIZE:
290 		base_hash = crypto_alloc_shash("sha1", 0, 0);
291 		break;
292 	case SHA224_DIGEST_SIZE:
293 		base_hash = crypto_alloc_shash("sha224", 0, 0);
294 		break;
295 	case SHA256_DIGEST_SIZE:
296 		base_hash = crypto_alloc_shash("sha256", 0, 0);
297 		break;
298 	case SHA384_DIGEST_SIZE:
299 		base_hash = crypto_alloc_shash("sha384", 0, 0);
300 		break;
301 	case SHA512_DIGEST_SIZE:
302 		base_hash = crypto_alloc_shash("sha512", 0, 0);
303 		break;
304 	}
305 
306 	return base_hash;
307 }
308 
chcr_compute_partial_hash(struct shash_desc * desc,char * iopad,char * result_hash,int digest_size)309 static int chcr_compute_partial_hash(struct shash_desc *desc,
310 				     char *iopad, char *result_hash,
311 				     int digest_size)
312 {
313 	struct sha1_state sha1_st;
314 	struct sha256_state sha256_st;
315 	struct sha512_state sha512_st;
316 	int error;
317 
318 	if (digest_size == SHA1_DIGEST_SIZE) {
319 		error = crypto_shash_init(desc) ?:
320 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
321 			crypto_shash_export(desc, (void *)&sha1_st);
322 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
323 	} else if (digest_size == SHA224_DIGEST_SIZE) {
324 		error = crypto_shash_init(desc) ?:
325 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
326 			crypto_shash_export(desc, (void *)&sha256_st);
327 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
328 
329 	} else if (digest_size == SHA256_DIGEST_SIZE) {
330 		error = crypto_shash_init(desc) ?:
331 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
332 			crypto_shash_export(desc, (void *)&sha256_st);
333 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
334 
335 	} else if (digest_size == SHA384_DIGEST_SIZE) {
336 		error = crypto_shash_init(desc) ?:
337 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
338 			crypto_shash_export(desc, (void *)&sha512_st);
339 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
340 
341 	} else if (digest_size == SHA512_DIGEST_SIZE) {
342 		error = crypto_shash_init(desc) ?:
343 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
344 			crypto_shash_export(desc, (void *)&sha512_st);
345 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
346 	} else {
347 		error = -EINVAL;
348 		pr_err("Unknown digest size %d\n", digest_size);
349 	}
350 	return error;
351 }
352 
chcr_change_order(char * buf,int ds)353 static void chcr_change_order(char *buf, int ds)
354 {
355 	int i;
356 
357 	if (ds == SHA512_DIGEST_SIZE) {
358 		for (i = 0; i < (ds / sizeof(u64)); i++)
359 			*((__be64 *)buf + i) =
360 				cpu_to_be64(*((u64 *)buf + i));
361 	} else {
362 		for (i = 0; i < (ds / sizeof(u32)); i++)
363 			*((__be32 *)buf + i) =
364 				cpu_to_be32(*((u32 *)buf + i));
365 	}
366 }
367 
is_hmac(struct crypto_tfm * tfm)368 static inline int is_hmac(struct crypto_tfm *tfm)
369 {
370 	struct crypto_alg *alg = tfm->__crt_alg;
371 	struct chcr_alg_template *chcr_crypto_alg =
372 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
373 			     alg.hash);
374 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
375 		return 1;
376 	return 0;
377 }
378 
dsgl_walk_init(struct dsgl_walk * walk,struct cpl_rx_phys_dsgl * dsgl)379 static inline void dsgl_walk_init(struct dsgl_walk *walk,
380 				   struct cpl_rx_phys_dsgl *dsgl)
381 {
382 	walk->dsgl = dsgl;
383 	walk->nents = 0;
384 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
385 }
386 
dsgl_walk_end(struct dsgl_walk * walk,unsigned short qid,int pci_chan_id)387 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
388 				 int pci_chan_id)
389 {
390 	struct cpl_rx_phys_dsgl *phys_cpl;
391 
392 	phys_cpl = walk->dsgl;
393 
394 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
395 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
396 	phys_cpl->pcirlxorder_to_noofsgentr =
397 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
398 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
399 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
400 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
401 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
402 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
403 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
404 	phys_cpl->rss_hdr_int.qid = htons(qid);
405 	phys_cpl->rss_hdr_int.hash_val = 0;
406 	phys_cpl->rss_hdr_int.channel = pci_chan_id;
407 }
408 
dsgl_walk_add_page(struct dsgl_walk * walk,size_t size,dma_addr_t addr)409 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
410 					size_t size,
411 					dma_addr_t addr)
412 {
413 	int j;
414 
415 	if (!size)
416 		return;
417 	j = walk->nents;
418 	walk->to->len[j % 8] = htons(size);
419 	walk->to->addr[j % 8] = cpu_to_be64(addr);
420 	j++;
421 	if ((j % 8) == 0)
422 		walk->to++;
423 	walk->nents = j;
424 }
425 
dsgl_walk_add_sg(struct dsgl_walk * walk,struct scatterlist * sg,unsigned int slen,unsigned int skip)426 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
427 			   struct scatterlist *sg,
428 			      unsigned int slen,
429 			      unsigned int skip)
430 {
431 	int skip_len = 0;
432 	unsigned int left_size = slen, len = 0;
433 	unsigned int j = walk->nents;
434 	int offset, ent_len;
435 
436 	if (!slen)
437 		return;
438 	while (sg && skip) {
439 		if (sg_dma_len(sg) <= skip) {
440 			skip -= sg_dma_len(sg);
441 			skip_len = 0;
442 			sg = sg_next(sg);
443 		} else {
444 			skip_len = skip;
445 			skip = 0;
446 		}
447 	}
448 
449 	while (left_size && sg) {
450 		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
451 		offset = 0;
452 		while (len) {
453 			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
454 			walk->to->len[j % 8] = htons(ent_len);
455 			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
456 						      offset + skip_len);
457 			offset += ent_len;
458 			len -= ent_len;
459 			j++;
460 			if ((j % 8) == 0)
461 				walk->to++;
462 		}
463 		walk->last_sg = sg;
464 		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
465 					  skip_len) + skip_len;
466 		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
467 		skip_len = 0;
468 		sg = sg_next(sg);
469 	}
470 	walk->nents = j;
471 }
472 
ulptx_walk_init(struct ulptx_walk * walk,struct ulptx_sgl * ulp)473 static inline void ulptx_walk_init(struct ulptx_walk *walk,
474 				   struct ulptx_sgl *ulp)
475 {
476 	walk->sgl = ulp;
477 	walk->nents = 0;
478 	walk->pair_idx = 0;
479 	walk->pair = ulp->sge;
480 	walk->last_sg = NULL;
481 	walk->last_sg_len = 0;
482 }
483 
ulptx_walk_end(struct ulptx_walk * walk)484 static inline void ulptx_walk_end(struct ulptx_walk *walk)
485 {
486 	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
487 			      ULPTX_NSGE_V(walk->nents));
488 }
489 
490 
ulptx_walk_add_page(struct ulptx_walk * walk,size_t size,dma_addr_t addr)491 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
492 					size_t size,
493 					dma_addr_t addr)
494 {
495 	if (!size)
496 		return;
497 
498 	if (walk->nents == 0) {
499 		walk->sgl->len0 = cpu_to_be32(size);
500 		walk->sgl->addr0 = cpu_to_be64(addr);
501 	} else {
502 		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
503 		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
504 		walk->pair_idx = !walk->pair_idx;
505 		if (!walk->pair_idx)
506 			walk->pair++;
507 	}
508 	walk->nents++;
509 }
510 
ulptx_walk_add_sg(struct ulptx_walk * walk,struct scatterlist * sg,unsigned int len,unsigned int skip)511 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
512 					struct scatterlist *sg,
513 			       unsigned int len,
514 			       unsigned int skip)
515 {
516 	int small;
517 	int skip_len = 0;
518 	unsigned int sgmin;
519 
520 	if (!len)
521 		return;
522 	while (sg && skip) {
523 		if (sg_dma_len(sg) <= skip) {
524 			skip -= sg_dma_len(sg);
525 			skip_len = 0;
526 			sg = sg_next(sg);
527 		} else {
528 			skip_len = skip;
529 			skip = 0;
530 		}
531 	}
532 	WARN(!sg, "SG should not be null here\n");
533 	if (sg && (walk->nents == 0)) {
534 		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
535 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
536 		walk->sgl->len0 = cpu_to_be32(sgmin);
537 		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
538 		walk->nents++;
539 		len -= sgmin;
540 		walk->last_sg = sg;
541 		walk->last_sg_len = sgmin + skip_len;
542 		skip_len += sgmin;
543 		if (sg_dma_len(sg) == skip_len) {
544 			sg = sg_next(sg);
545 			skip_len = 0;
546 		}
547 	}
548 
549 	while (sg && len) {
550 		small = min(sg_dma_len(sg) - skip_len, len);
551 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
552 		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
553 		walk->pair->addr[walk->pair_idx] =
554 			cpu_to_be64(sg_dma_address(sg) + skip_len);
555 		walk->pair_idx = !walk->pair_idx;
556 		walk->nents++;
557 		if (!walk->pair_idx)
558 			walk->pair++;
559 		len -= sgmin;
560 		skip_len += sgmin;
561 		walk->last_sg = sg;
562 		walk->last_sg_len = skip_len;
563 		if (sg_dma_len(sg) == skip_len) {
564 			sg = sg_next(sg);
565 			skip_len = 0;
566 		}
567 	}
568 }
569 
get_cryptoalg_subtype(struct crypto_skcipher * tfm)570 static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm)
571 {
572 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
573 	struct chcr_alg_template *chcr_crypto_alg =
574 		container_of(alg, struct chcr_alg_template, alg.skcipher);
575 
576 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
577 }
578 
cxgb4_is_crypto_q_full(struct net_device * dev,unsigned int idx)579 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
580 {
581 	struct adapter *adap = netdev2adap(dev);
582 	struct sge_uld_txq_info *txq_info =
583 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
584 	struct sge_uld_txq *txq;
585 	int ret = 0;
586 
587 	local_bh_disable();
588 	txq = &txq_info->uldtxq[idx];
589 	spin_lock(&txq->sendq.lock);
590 	if (txq->full)
591 		ret = -1;
592 	spin_unlock(&txq->sendq.lock);
593 	local_bh_enable();
594 	return ret;
595 }
596 
generate_copy_rrkey(struct ablk_ctx * ablkctx,struct _key_ctx * key_ctx)597 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
598 			       struct _key_ctx *key_ctx)
599 {
600 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
601 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
602 	} else {
603 		memcpy(key_ctx->key,
604 		       ablkctx->key + (ablkctx->enckey_len >> 1),
605 		       ablkctx->enckey_len >> 1);
606 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
607 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
608 	}
609 	return 0;
610 }
611 
chcr_hash_ent_in_wr(struct scatterlist * src,unsigned int minsg,unsigned int space,unsigned int srcskip)612 static int chcr_hash_ent_in_wr(struct scatterlist *src,
613 			     unsigned int minsg,
614 			     unsigned int space,
615 			     unsigned int srcskip)
616 {
617 	int srclen = 0;
618 	int srcsg = minsg;
619 	int soffset = 0, sless;
620 
621 	if (sg_dma_len(src) == srcskip) {
622 		src = sg_next(src);
623 		srcskip = 0;
624 	}
625 	while (src && space > (sgl_ent_len[srcsg + 1])) {
626 		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
627 							CHCR_SRC_SG_SIZE);
628 		srclen += sless;
629 		soffset += sless;
630 		srcsg++;
631 		if (sg_dma_len(src) == (soffset + srcskip)) {
632 			src = sg_next(src);
633 			soffset = 0;
634 			srcskip = 0;
635 		}
636 	}
637 	return srclen;
638 }
639 
chcr_sg_ent_in_wr(struct scatterlist * src,struct scatterlist * dst,unsigned int minsg,unsigned int space,unsigned int srcskip,unsigned int dstskip)640 static int chcr_sg_ent_in_wr(struct scatterlist *src,
641 			     struct scatterlist *dst,
642 			     unsigned int minsg,
643 			     unsigned int space,
644 			     unsigned int srcskip,
645 			     unsigned int dstskip)
646 {
647 	int srclen = 0, dstlen = 0;
648 	int srcsg = minsg, dstsg = minsg;
649 	int offset = 0, soffset = 0, less, sless = 0;
650 
651 	if (sg_dma_len(src) == srcskip) {
652 		src = sg_next(src);
653 		srcskip = 0;
654 	}
655 	if (sg_dma_len(dst) == dstskip) {
656 		dst = sg_next(dst);
657 		dstskip = 0;
658 	}
659 
660 	while (src && dst &&
661 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
662 		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
663 				CHCR_SRC_SG_SIZE);
664 		srclen += sless;
665 		srcsg++;
666 		offset = 0;
667 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
668 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
669 			if (srclen <= dstlen)
670 				break;
671 			less = min_t(unsigned int, sg_dma_len(dst) - offset -
672 				     dstskip, CHCR_DST_SG_SIZE);
673 			dstlen += less;
674 			offset += less;
675 			if ((offset + dstskip) == sg_dma_len(dst)) {
676 				dst = sg_next(dst);
677 				offset = 0;
678 			}
679 			dstsg++;
680 			dstskip = 0;
681 		}
682 		soffset += sless;
683 		if ((soffset + srcskip) == sg_dma_len(src)) {
684 			src = sg_next(src);
685 			srcskip = 0;
686 			soffset = 0;
687 		}
688 
689 	}
690 	return min(srclen, dstlen);
691 }
692 
chcr_cipher_fallback(struct crypto_skcipher * cipher,struct skcipher_request * req,u8 * iv,unsigned short op_type)693 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
694 				struct skcipher_request *req,
695 				u8 *iv,
696 				unsigned short op_type)
697 {
698 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
699 	int err;
700 
701 	skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
702 	skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
703 				      req->base.complete, req->base.data);
704 	skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
705 				   req->cryptlen, iv);
706 
707 	err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
708 			crypto_skcipher_encrypt(&reqctx->fallback_req);
709 
710 	return err;
711 
712 }
713 
get_qidxs(struct crypto_async_request * req,unsigned int * txqidx,unsigned int * rxqidx)714 static inline int get_qidxs(struct crypto_async_request *req,
715 			    unsigned int *txqidx, unsigned int *rxqidx)
716 {
717 	struct crypto_tfm *tfm = req->tfm;
718 	int ret = 0;
719 
720 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
721 	case CRYPTO_ALG_TYPE_AEAD:
722 	{
723 		struct aead_request *aead_req =
724 			container_of(req, struct aead_request, base);
725 		struct chcr_aead_reqctx *reqctx = aead_request_ctx(aead_req);
726 		*txqidx = reqctx->txqidx;
727 		*rxqidx = reqctx->rxqidx;
728 		break;
729 	}
730 	case CRYPTO_ALG_TYPE_SKCIPHER:
731 	{
732 		struct skcipher_request *sk_req =
733 			container_of(req, struct skcipher_request, base);
734 		struct chcr_skcipher_req_ctx *reqctx =
735 			skcipher_request_ctx(sk_req);
736 		*txqidx = reqctx->txqidx;
737 		*rxqidx = reqctx->rxqidx;
738 		break;
739 	}
740 	case CRYPTO_ALG_TYPE_AHASH:
741 	{
742 		struct ahash_request *ahash_req =
743 			container_of(req, struct ahash_request, base);
744 		struct chcr_ahash_req_ctx *reqctx =
745 			ahash_request_ctx(ahash_req);
746 		*txqidx = reqctx->txqidx;
747 		*rxqidx = reqctx->rxqidx;
748 		break;
749 	}
750 	default:
751 		ret = -EINVAL;
752 		/* should never get here */
753 		BUG();
754 		break;
755 	}
756 	return ret;
757 }
758 
create_wreq(struct chcr_context * ctx,struct chcr_wr * chcr_req,struct crypto_async_request * req,unsigned int imm,int hash_sz,unsigned int len16,unsigned int sc_len,unsigned int lcb)759 static inline void create_wreq(struct chcr_context *ctx,
760 			       struct chcr_wr *chcr_req,
761 			       struct crypto_async_request *req,
762 			       unsigned int imm,
763 			       int hash_sz,
764 			       unsigned int len16,
765 			       unsigned int sc_len,
766 			       unsigned int lcb)
767 {
768 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
769 	unsigned int tx_channel_id, rx_channel_id;
770 	unsigned int txqidx = 0, rxqidx = 0;
771 	unsigned int qid, fid, portno;
772 
773 	get_qidxs(req, &txqidx, &rxqidx);
774 	qid = u_ctx->lldi.rxq_ids[rxqidx];
775 	fid = u_ctx->lldi.rxq_ids[0];
776 	portno = rxqidx / ctx->rxq_perchan;
777 	tx_channel_id = txqidx / ctx->txq_perchan;
778 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
779 
780 
781 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
782 	chcr_req->wreq.pld_size_hash_size =
783 		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
784 	chcr_req->wreq.len16_pkd =
785 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
786 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
787 	chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid,
788 							    !!lcb, txqidx);
789 
790 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid);
791 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
792 				((sizeof(chcr_req->wreq)) >> 4)));
793 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
794 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
795 					   sizeof(chcr_req->key_ctx) + sc_len);
796 }
797 
798 /**
799  *	create_cipher_wr - form the WR for cipher operations
800  *	@req: cipher req.
801  *	@ctx: crypto driver context of the request.
802  *	@qid: ingress qid where response of this WR should be received.
803  *	@op_type:	encryption or decryption
804  */
create_cipher_wr(struct cipher_wr_param * wrparam)805 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
806 {
807 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
808 	struct chcr_context *ctx = c_ctx(tfm);
809 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
810 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
811 	struct sk_buff *skb = NULL;
812 	struct chcr_wr *chcr_req;
813 	struct cpl_rx_phys_dsgl *phys_cpl;
814 	struct ulptx_sgl *ulptx;
815 	struct chcr_skcipher_req_ctx *reqctx =
816 		skcipher_request_ctx(wrparam->req);
817 	unsigned int temp = 0, transhdr_len, dst_size;
818 	int error;
819 	int nents;
820 	unsigned int kctx_len;
821 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
822 			GFP_KERNEL : GFP_ATOMIC;
823 	struct adapter *adap = padap(ctx->dev);
824 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
825 
826 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
827 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
828 			      reqctx->dst_ofst);
829 	dst_size = get_space_for_phys_dsgl(nents);
830 	kctx_len = roundup(ablkctx->enckey_len, 16);
831 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
832 	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
833 				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
834 	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
835 				     (sgl_len(nents) * 8);
836 	transhdr_len += temp;
837 	transhdr_len = roundup(transhdr_len, 16);
838 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
839 	if (!skb) {
840 		error = -ENOMEM;
841 		goto err;
842 	}
843 	chcr_req = __skb_put_zero(skb, transhdr_len);
844 	chcr_req->sec_cpl.op_ivinsrtofst =
845 			FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
846 
847 	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
848 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
849 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
850 
851 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
852 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
853 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
854 							 ablkctx->ciph_mode,
855 							 0, 0, IV >> 1);
856 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
857 							  0, 1, dst_size);
858 
859 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
860 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
861 	    (!(get_cryptoalg_subtype(tfm) ==
862 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
863 	    (!(get_cryptoalg_subtype(tfm) ==
864 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
865 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
866 	} else {
867 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
868 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
869 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
870 			       ablkctx->enckey_len);
871 		} else {
872 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
873 			       (ablkctx->enckey_len >> 1),
874 			       ablkctx->enckey_len >> 1);
875 			memcpy(chcr_req->key_ctx.key +
876 			       (ablkctx->enckey_len >> 1),
877 			       ablkctx->key,
878 			       ablkctx->enckey_len >> 1);
879 		}
880 	}
881 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
882 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
883 	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
884 	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
885 
886 	atomic_inc(&adap->chcr_stats.cipher_rqst);
887 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
888 		+ (reqctx->imm ? (wrparam->bytes) : 0);
889 	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
890 		    transhdr_len, temp,
891 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
892 	reqctx->skb = skb;
893 
894 	if (reqctx->op && (ablkctx->ciph_mode ==
895 			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
896 		sg_pcopy_to_buffer(wrparam->req->src,
897 			sg_nents(wrparam->req->src), wrparam->req->iv, 16,
898 			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
899 
900 	return skb;
901 err:
902 	return ERR_PTR(error);
903 }
904 
chcr_keyctx_ck_size(unsigned int keylen)905 static inline int chcr_keyctx_ck_size(unsigned int keylen)
906 {
907 	int ck_size = 0;
908 
909 	if (keylen == AES_KEYSIZE_128)
910 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
911 	else if (keylen == AES_KEYSIZE_192)
912 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
913 	else if (keylen == AES_KEYSIZE_256)
914 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
915 	else
916 		ck_size = 0;
917 
918 	return ck_size;
919 }
chcr_cipher_fallback_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)920 static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
921 				       const u8 *key,
922 				       unsigned int keylen)
923 {
924 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
925 
926 	crypto_skcipher_clear_flags(ablkctx->sw_cipher,
927 				CRYPTO_TFM_REQ_MASK);
928 	crypto_skcipher_set_flags(ablkctx->sw_cipher,
929 				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
930 	return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
931 }
932 
chcr_aes_cbc_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)933 static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
934 			       const u8 *key,
935 			       unsigned int keylen)
936 {
937 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
938 	unsigned int ck_size, context_size;
939 	u16 alignment = 0;
940 	int err;
941 
942 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
943 	if (err)
944 		goto badkey_err;
945 
946 	ck_size = chcr_keyctx_ck_size(keylen);
947 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
948 	memcpy(ablkctx->key, key, keylen);
949 	ablkctx->enckey_len = keylen;
950 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
951 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
952 			keylen + alignment) >> 4;
953 
954 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
955 						0, 0, context_size);
956 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
957 	return 0;
958 badkey_err:
959 	ablkctx->enckey_len = 0;
960 
961 	return err;
962 }
963 
chcr_aes_ctr_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)964 static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher,
965 				   const u8 *key,
966 				   unsigned int keylen)
967 {
968 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
969 	unsigned int ck_size, context_size;
970 	u16 alignment = 0;
971 	int err;
972 
973 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
974 	if (err)
975 		goto badkey_err;
976 	ck_size = chcr_keyctx_ck_size(keylen);
977 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
978 	memcpy(ablkctx->key, key, keylen);
979 	ablkctx->enckey_len = keylen;
980 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
981 			keylen + alignment) >> 4;
982 
983 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
984 						0, 0, context_size);
985 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
986 
987 	return 0;
988 badkey_err:
989 	ablkctx->enckey_len = 0;
990 
991 	return err;
992 }
993 
chcr_aes_rfc3686_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)994 static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher,
995 				   const u8 *key,
996 				   unsigned int keylen)
997 {
998 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
999 	unsigned int ck_size, context_size;
1000 	u16 alignment = 0;
1001 	int err;
1002 
1003 	if (keylen < CTR_RFC3686_NONCE_SIZE)
1004 		return -EINVAL;
1005 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
1006 	       CTR_RFC3686_NONCE_SIZE);
1007 
1008 	keylen -= CTR_RFC3686_NONCE_SIZE;
1009 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
1010 	if (err)
1011 		goto badkey_err;
1012 
1013 	ck_size = chcr_keyctx_ck_size(keylen);
1014 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
1015 	memcpy(ablkctx->key, key, keylen);
1016 	ablkctx->enckey_len = keylen;
1017 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
1018 			keylen + alignment) >> 4;
1019 
1020 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
1021 						0, 0, context_size);
1022 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
1023 
1024 	return 0;
1025 badkey_err:
1026 	ablkctx->enckey_len = 0;
1027 
1028 	return err;
1029 }
ctr_add_iv(u8 * dstiv,u8 * srciv,u32 add)1030 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
1031 {
1032 	unsigned int size = AES_BLOCK_SIZE;
1033 	__be32 *b = (__be32 *)(dstiv + size);
1034 	u32 c, prev;
1035 
1036 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
1037 	for (; size >= 4; size -= 4) {
1038 		prev = be32_to_cpu(*--b);
1039 		c = prev + add;
1040 		*b = cpu_to_be32(c);
1041 		if (prev < c)
1042 			break;
1043 		add = 1;
1044 	}
1045 
1046 }
1047 
adjust_ctr_overflow(u8 * iv,u32 bytes)1048 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1049 {
1050 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1051 	u64 c;
1052 	u32 temp = be32_to_cpu(*--b);
1053 
1054 	temp = ~temp;
1055 	c = (u64)temp +  1; // No of block can processed without overflow
1056 	if ((bytes / AES_BLOCK_SIZE) >= c)
1057 		bytes = c * AES_BLOCK_SIZE;
1058 	return bytes;
1059 }
1060 
chcr_update_tweak(struct skcipher_request * req,u8 * iv,u32 isfinal)1061 static int chcr_update_tweak(struct skcipher_request *req, u8 *iv,
1062 			     u32 isfinal)
1063 {
1064 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1065 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1066 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1067 	struct crypto_aes_ctx aes;
1068 	int ret, i;
1069 	u8 *key;
1070 	unsigned int keylen;
1071 	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1072 	int round8 = round / 8;
1073 
1074 	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1075 
1076 	keylen = ablkctx->enckey_len / 2;
1077 	key = ablkctx->key + keylen;
1078 	/* For a 192 bit key remove the padded zeroes which was
1079 	 * added in chcr_xts_setkey
1080 	 */
1081 	if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr))
1082 			== CHCR_KEYCTX_CIPHER_KEY_SIZE_192)
1083 		ret = aes_expandkey(&aes, key, keylen - 8);
1084 	else
1085 		ret = aes_expandkey(&aes, key, keylen);
1086 	if (ret)
1087 		return ret;
1088 	aes_encrypt(&aes, iv, iv);
1089 	for (i = 0; i < round8; i++)
1090 		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1091 
1092 	for (i = 0; i < (round % 8); i++)
1093 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1094 
1095 	if (!isfinal)
1096 		aes_decrypt(&aes, iv, iv);
1097 
1098 	memzero_explicit(&aes, sizeof(aes));
1099 	return 0;
1100 }
1101 
chcr_update_cipher_iv(struct skcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1102 static int chcr_update_cipher_iv(struct skcipher_request *req,
1103 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1104 {
1105 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1106 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1107 	int subtype = get_cryptoalg_subtype(tfm);
1108 	int ret = 0;
1109 
1110 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1111 		ctr_add_iv(iv, req->iv, (reqctx->processed /
1112 			   AES_BLOCK_SIZE));
1113 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1114 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1115 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1116 						AES_BLOCK_SIZE) + 1);
1117 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1118 		ret = chcr_update_tweak(req, iv, 0);
1119 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1120 		if (reqctx->op)
1121 			/*Updated before sending last WR*/
1122 			memcpy(iv, req->iv, AES_BLOCK_SIZE);
1123 		else
1124 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1125 	}
1126 
1127 	return ret;
1128 
1129 }
1130 
1131 /* We need separate function for final iv because in rfc3686  Initial counter
1132  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1133  * for subsequent update requests
1134  */
1135 
chcr_final_cipher_iv(struct skcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1136 static int chcr_final_cipher_iv(struct skcipher_request *req,
1137 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1138 {
1139 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1140 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1141 	int subtype = get_cryptoalg_subtype(tfm);
1142 	int ret = 0;
1143 
1144 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1145 		ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed,
1146 						       AES_BLOCK_SIZE));
1147 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) {
1148 		if (!reqctx->partial_req)
1149 			memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1150 		else
1151 			ret = chcr_update_tweak(req, iv, 1);
1152 	}
1153 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1154 		/*Already updated for Decrypt*/
1155 		if (!reqctx->op)
1156 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1157 
1158 	}
1159 	return ret;
1160 
1161 }
1162 
chcr_handle_cipher_resp(struct skcipher_request * req,unsigned char * input,int err)1163 static int chcr_handle_cipher_resp(struct skcipher_request *req,
1164 				   unsigned char *input, int err)
1165 {
1166 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1167 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1168 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1169 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1170 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1171 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1172 	struct chcr_context *ctx = c_ctx(tfm);
1173 	struct adapter *adap = padap(ctx->dev);
1174 	struct cipher_wr_param wrparam;
1175 	struct sk_buff *skb;
1176 	int bytes;
1177 
1178 	if (err)
1179 		goto unmap;
1180 	if (req->cryptlen == reqctx->processed) {
1181 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1182 				      req);
1183 		err = chcr_final_cipher_iv(req, fw6_pld, req->iv);
1184 		goto complete;
1185 	}
1186 
1187 	if (!reqctx->imm) {
1188 		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1189 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1190 					  reqctx->src_ofst, reqctx->dst_ofst);
1191 		if ((bytes + reqctx->processed) >= req->cryptlen)
1192 			bytes  = req->cryptlen - reqctx->processed;
1193 		else
1194 			bytes = rounddown(bytes, 16);
1195 	} else {
1196 		/*CTR mode counter overfloa*/
1197 		bytes  = req->cryptlen - reqctx->processed;
1198 	}
1199 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1200 	if (err)
1201 		goto unmap;
1202 
1203 	if (unlikely(bytes == 0)) {
1204 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1205 				      req);
1206 		memcpy(req->iv, reqctx->init_iv, IV);
1207 		atomic_inc(&adap->chcr_stats.fallback);
1208 		err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
1209 					   reqctx->op);
1210 		goto complete;
1211 	}
1212 
1213 	if (get_cryptoalg_subtype(tfm) ==
1214 	    CRYPTO_ALG_SUB_TYPE_CTR)
1215 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1216 	wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx];
1217 	wrparam.req = req;
1218 	wrparam.bytes = bytes;
1219 	skb = create_cipher_wr(&wrparam);
1220 	if (IS_ERR(skb)) {
1221 		pr_err("%s : Failed to form WR. No memory\n", __func__);
1222 		err = PTR_ERR(skb);
1223 		goto unmap;
1224 	}
1225 	skb->dev = u_ctx->lldi.ports[0];
1226 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1227 	chcr_send_wr(skb);
1228 	reqctx->last_req_len = bytes;
1229 	reqctx->processed += bytes;
1230 	if (get_cryptoalg_subtype(tfm) ==
1231 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1232 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1233 		complete(&ctx->cbc_aes_aio_done);
1234 	}
1235 	return 0;
1236 unmap:
1237 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1238 complete:
1239 	if (get_cryptoalg_subtype(tfm) ==
1240 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1241 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1242 		complete(&ctx->cbc_aes_aio_done);
1243 	}
1244 	chcr_dec_wrcount(dev);
1245 	req->base.complete(&req->base, err);
1246 	return err;
1247 }
1248 
process_cipher(struct skcipher_request * req,unsigned short qid,struct sk_buff ** skb,unsigned short op_type)1249 static int process_cipher(struct skcipher_request *req,
1250 				  unsigned short qid,
1251 				  struct sk_buff **skb,
1252 				  unsigned short op_type)
1253 {
1254 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1255 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1256 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
1257 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1258 	struct adapter *adap = padap(c_ctx(tfm)->dev);
1259 	struct	cipher_wr_param wrparam;
1260 	int bytes, err = -EINVAL;
1261 	int subtype;
1262 
1263 	reqctx->processed = 0;
1264 	reqctx->partial_req = 0;
1265 	if (!req->iv)
1266 		goto error;
1267 	subtype = get_cryptoalg_subtype(tfm);
1268 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1269 	    (req->cryptlen == 0) ||
1270 	    (req->cryptlen % crypto_skcipher_blocksize(tfm))) {
1271 		if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS)
1272 			goto fallback;
1273 		else if (req->cryptlen % crypto_skcipher_blocksize(tfm) &&
1274 			 subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1275 			goto fallback;
1276 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1277 		       ablkctx->enckey_len, req->cryptlen, ivsize);
1278 		goto error;
1279 	}
1280 
1281 	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1282 	if (err)
1283 		goto error;
1284 	if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1285 					    AES_MIN_KEY_SIZE +
1286 					    sizeof(struct cpl_rx_phys_dsgl) +
1287 					/*Min dsgl size*/
1288 					    32))) {
1289 		/* Can be sent as Imm*/
1290 		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1291 
1292 		dnents = sg_nents_xlen(req->dst, req->cryptlen,
1293 				       CHCR_DST_SG_SIZE, 0);
1294 		phys_dsgl = get_space_for_phys_dsgl(dnents);
1295 		kctx_len = roundup(ablkctx->enckey_len, 16);
1296 		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1297 		reqctx->imm = (transhdr_len + IV + req->cryptlen) <=
1298 			SGE_MAX_WR_LEN;
1299 		bytes = IV + req->cryptlen;
1300 
1301 	} else {
1302 		reqctx->imm = 0;
1303 	}
1304 
1305 	if (!reqctx->imm) {
1306 		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1307 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1308 					  0, 0);
1309 		if ((bytes + reqctx->processed) >= req->cryptlen)
1310 			bytes  = req->cryptlen - reqctx->processed;
1311 		else
1312 			bytes = rounddown(bytes, 16);
1313 	} else {
1314 		bytes = req->cryptlen;
1315 	}
1316 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) {
1317 		bytes = adjust_ctr_overflow(req->iv, bytes);
1318 	}
1319 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1320 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1321 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1322 				CTR_RFC3686_IV_SIZE);
1323 
1324 		/* initialize counter portion of counter block */
1325 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1326 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1327 		memcpy(reqctx->init_iv, reqctx->iv, IV);
1328 
1329 	} else {
1330 
1331 		memcpy(reqctx->iv, req->iv, IV);
1332 		memcpy(reqctx->init_iv, req->iv, IV);
1333 	}
1334 	if (unlikely(bytes == 0)) {
1335 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1336 				      req);
1337 fallback:       atomic_inc(&adap->chcr_stats.fallback);
1338 		err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
1339 					   subtype ==
1340 					   CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
1341 					   reqctx->iv : req->iv,
1342 					   op_type);
1343 		goto error;
1344 	}
1345 	reqctx->op = op_type;
1346 	reqctx->srcsg = req->src;
1347 	reqctx->dstsg = req->dst;
1348 	reqctx->src_ofst = 0;
1349 	reqctx->dst_ofst = 0;
1350 	wrparam.qid = qid;
1351 	wrparam.req = req;
1352 	wrparam.bytes = bytes;
1353 	*skb = create_cipher_wr(&wrparam);
1354 	if (IS_ERR(*skb)) {
1355 		err = PTR_ERR(*skb);
1356 		goto unmap;
1357 	}
1358 	reqctx->processed = bytes;
1359 	reqctx->last_req_len = bytes;
1360 	reqctx->partial_req = !!(req->cryptlen - reqctx->processed);
1361 
1362 	return 0;
1363 unmap:
1364 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1365 error:
1366 	return err;
1367 }
1368 
chcr_aes_encrypt(struct skcipher_request * req)1369 static int chcr_aes_encrypt(struct skcipher_request *req)
1370 {
1371 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1372 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1373 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1374 	struct sk_buff *skb = NULL;
1375 	int err;
1376 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1377 	struct chcr_context *ctx = c_ctx(tfm);
1378 	unsigned int cpu;
1379 
1380 	cpu = get_cpu();
1381 	reqctx->txqidx = cpu % ctx->ntxq;
1382 	reqctx->rxqidx = cpu % ctx->nrxq;
1383 	put_cpu();
1384 
1385 	err = chcr_inc_wrcount(dev);
1386 	if (err)
1387 		return -ENXIO;
1388 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1389 						reqctx->txqidx) &&
1390 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1391 			err = -ENOSPC;
1392 			goto error;
1393 	}
1394 
1395 	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1396 			     &skb, CHCR_ENCRYPT_OP);
1397 	if (err || !skb)
1398 		return  err;
1399 	skb->dev = u_ctx->lldi.ports[0];
1400 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1401 	chcr_send_wr(skb);
1402 	if (get_cryptoalg_subtype(tfm) ==
1403 		CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags ==
1404 			CRYPTO_TFM_REQ_MAY_SLEEP ) {
1405 			reqctx->partial_req = 1;
1406 			wait_for_completion(&ctx->cbc_aes_aio_done);
1407         }
1408 	return -EINPROGRESS;
1409 error:
1410 	chcr_dec_wrcount(dev);
1411 	return err;
1412 }
1413 
chcr_aes_decrypt(struct skcipher_request * req)1414 static int chcr_aes_decrypt(struct skcipher_request *req)
1415 {
1416 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1417 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
1418 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1419 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1420 	struct sk_buff *skb = NULL;
1421 	int err;
1422 	struct chcr_context *ctx = c_ctx(tfm);
1423 	unsigned int cpu;
1424 
1425 	cpu = get_cpu();
1426 	reqctx->txqidx = cpu % ctx->ntxq;
1427 	reqctx->rxqidx = cpu % ctx->nrxq;
1428 	put_cpu();
1429 
1430 	err = chcr_inc_wrcount(dev);
1431 	if (err)
1432 		return -ENXIO;
1433 
1434 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1435 						reqctx->txqidx) &&
1436 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))))
1437 			return -ENOSPC;
1438 	err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx],
1439 			     &skb, CHCR_DECRYPT_OP);
1440 	if (err || !skb)
1441 		return err;
1442 	skb->dev = u_ctx->lldi.ports[0];
1443 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
1444 	chcr_send_wr(skb);
1445 	return -EINPROGRESS;
1446 }
chcr_device_init(struct chcr_context * ctx)1447 static int chcr_device_init(struct chcr_context *ctx)
1448 {
1449 	struct uld_ctx *u_ctx = NULL;
1450 	int txq_perchan, ntxq;
1451 	int err = 0, rxq_perchan;
1452 
1453 	if (!ctx->dev) {
1454 		u_ctx = assign_chcr_device();
1455 		if (!u_ctx) {
1456 			err = -ENXIO;
1457 			pr_err("chcr device assignment fails\n");
1458 			goto out;
1459 		}
1460 		ctx->dev = &u_ctx->dev;
1461 		ntxq = u_ctx->lldi.ntxq;
1462 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1463 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1464 		ctx->ntxq = ntxq;
1465 		ctx->nrxq = u_ctx->lldi.nrxq;
1466 		ctx->rxq_perchan = rxq_perchan;
1467 		ctx->txq_perchan = txq_perchan;
1468 	}
1469 out:
1470 	return err;
1471 }
1472 
chcr_init_tfm(struct crypto_skcipher * tfm)1473 static int chcr_init_tfm(struct crypto_skcipher *tfm)
1474 {
1475 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1476 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1477 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1478 
1479 	ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
1480 				CRYPTO_ALG_NEED_FALLBACK);
1481 	if (IS_ERR(ablkctx->sw_cipher)) {
1482 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1483 		return PTR_ERR(ablkctx->sw_cipher);
1484 	}
1485 	init_completion(&ctx->cbc_aes_aio_done);
1486 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1487 					 crypto_skcipher_reqsize(ablkctx->sw_cipher));
1488 
1489 	return chcr_device_init(ctx);
1490 }
1491 
chcr_rfc3686_init(struct crypto_skcipher * tfm)1492 static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
1493 {
1494 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1495 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1496 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1497 
1498 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1499 	 * cannot be used as fallback in chcr_handle_cipher_response
1500 	 */
1501 	ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1502 				CRYPTO_ALG_NEED_FALLBACK);
1503 	if (IS_ERR(ablkctx->sw_cipher)) {
1504 		pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
1505 		return PTR_ERR(ablkctx->sw_cipher);
1506 	}
1507 	crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
1508 				    crypto_skcipher_reqsize(ablkctx->sw_cipher));
1509 	return chcr_device_init(ctx);
1510 }
1511 
1512 
chcr_exit_tfm(struct crypto_skcipher * tfm)1513 static void chcr_exit_tfm(struct crypto_skcipher *tfm)
1514 {
1515 	struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
1516 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1517 
1518 	crypto_free_skcipher(ablkctx->sw_cipher);
1519 }
1520 
get_alg_config(struct algo_param * params,unsigned int auth_size)1521 static int get_alg_config(struct algo_param *params,
1522 			  unsigned int auth_size)
1523 {
1524 	switch (auth_size) {
1525 	case SHA1_DIGEST_SIZE:
1526 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1527 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1528 		params->result_size = SHA1_DIGEST_SIZE;
1529 		break;
1530 	case SHA224_DIGEST_SIZE:
1531 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1532 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1533 		params->result_size = SHA256_DIGEST_SIZE;
1534 		break;
1535 	case SHA256_DIGEST_SIZE:
1536 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1537 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1538 		params->result_size = SHA256_DIGEST_SIZE;
1539 		break;
1540 	case SHA384_DIGEST_SIZE:
1541 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1542 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1543 		params->result_size = SHA512_DIGEST_SIZE;
1544 		break;
1545 	case SHA512_DIGEST_SIZE:
1546 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1547 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1548 		params->result_size = SHA512_DIGEST_SIZE;
1549 		break;
1550 	default:
1551 		pr_err("ERROR, unsupported digest size\n");
1552 		return -EINVAL;
1553 	}
1554 	return 0;
1555 }
1556 
chcr_free_shash(struct crypto_shash * base_hash)1557 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1558 {
1559 		crypto_free_shash(base_hash);
1560 }
1561 
1562 /**
1563  *	create_hash_wr - Create hash work request
1564  *	@req - Cipher req base
1565  */
create_hash_wr(struct ahash_request * req,struct hash_wr_param * param)1566 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1567 				      struct hash_wr_param *param)
1568 {
1569 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1570 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1571 	struct chcr_context *ctx = h_ctx(tfm);
1572 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1573 	struct sk_buff *skb = NULL;
1574 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
1575 	struct chcr_wr *chcr_req;
1576 	struct ulptx_sgl *ulptx;
1577 	unsigned int nents = 0, transhdr_len;
1578 	unsigned int temp = 0;
1579 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1580 		GFP_ATOMIC;
1581 	struct adapter *adap = padap(h_ctx(tfm)->dev);
1582 	int error = 0;
1583 	unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
1584 
1585 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
1586 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1587 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1588 				param->sg_len) <= SGE_MAX_WR_LEN;
1589 	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1590 		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1591 	nents += param->bfr_len ? 1 : 0;
1592 	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1593 				param->sg_len, 16) : (sgl_len(nents) * 8);
1594 	transhdr_len = roundup(transhdr_len, 16);
1595 
1596 	skb = alloc_skb(transhdr_len, flags);
1597 	if (!skb)
1598 		return ERR_PTR(-ENOMEM);
1599 	chcr_req = __skb_put_zero(skb, transhdr_len);
1600 
1601 	chcr_req->sec_cpl.op_ivinsrtofst =
1602 		FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0);
1603 
1604 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1605 
1606 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1607 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1608 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1609 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1610 	chcr_req->sec_cpl.seqno_numivs =
1611 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1612 					 param->opad_needed, 0);
1613 
1614 	chcr_req->sec_cpl.ivgen_hdrlen =
1615 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1616 
1617 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1618 	       param->alg_prm.result_size);
1619 
1620 	if (param->opad_needed)
1621 		memcpy(chcr_req->key_ctx.key +
1622 		       ((param->alg_prm.result_size <= 32) ? 32 :
1623 			CHCR_HASH_MAX_DIGEST_SIZE),
1624 		       hmacctx->opad, param->alg_prm.result_size);
1625 
1626 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1627 					    param->alg_prm.mk_size, 0,
1628 					    param->opad_needed,
1629 					    ((param->kctx_len +
1630 					     sizeof(chcr_req->key_ctx)) >> 4));
1631 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1632 	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1633 				     DUMMY_BYTES);
1634 	if (param->bfr_len != 0) {
1635 		req_ctx->hctx_wr.dma_addr =
1636 			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1637 				       param->bfr_len, DMA_TO_DEVICE);
1638 		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1639 				       req_ctx->hctx_wr. dma_addr)) {
1640 			error = -ENOMEM;
1641 			goto err;
1642 		}
1643 		req_ctx->hctx_wr.dma_len = param->bfr_len;
1644 	} else {
1645 		req_ctx->hctx_wr.dma_addr = 0;
1646 	}
1647 	chcr_add_hash_src_ent(req, ulptx, param);
1648 	/* Request upto max wr size */
1649 	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1650 				(param->sg_len + param->bfr_len) : 0);
1651 	atomic_inc(&adap->chcr_stats.digest_rqst);
1652 	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1653 		    param->hash_size, transhdr_len,
1654 		    temp,  0);
1655 	req_ctx->hctx_wr.skb = skb;
1656 	return skb;
1657 err:
1658 	kfree_skb(skb);
1659 	return  ERR_PTR(error);
1660 }
1661 
chcr_ahash_update(struct ahash_request * req)1662 static int chcr_ahash_update(struct ahash_request *req)
1663 {
1664 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1665 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1666 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1667 	struct chcr_context *ctx = h_ctx(rtfm);
1668 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1669 	struct sk_buff *skb;
1670 	u8 remainder = 0, bs;
1671 	unsigned int nbytes = req->nbytes;
1672 	struct hash_wr_param params;
1673 	int error;
1674 	unsigned int cpu;
1675 
1676 	cpu = get_cpu();
1677 	req_ctx->txqidx = cpu % ctx->ntxq;
1678 	req_ctx->rxqidx = cpu % ctx->nrxq;
1679 	put_cpu();
1680 
1681 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1682 
1683 	if (nbytes + req_ctx->reqlen >= bs) {
1684 		remainder = (nbytes + req_ctx->reqlen) % bs;
1685 		nbytes = nbytes + req_ctx->reqlen - remainder;
1686 	} else {
1687 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1688 				   + req_ctx->reqlen, nbytes, 0);
1689 		req_ctx->reqlen += nbytes;
1690 		return 0;
1691 	}
1692 	error = chcr_inc_wrcount(dev);
1693 	if (error)
1694 		return -ENXIO;
1695 	/* Detach state for CHCR means lldi or padap is freed. Increasing
1696 	 * inflight count for dev guarantees that lldi and padap is valid
1697 	 */
1698 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1699 						req_ctx->txqidx) &&
1700 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1701 			error = -ENOSPC;
1702 			goto err;
1703 	}
1704 
1705 	chcr_init_hctx_per_wr(req_ctx);
1706 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1707 	if (error) {
1708 		error = -ENOMEM;
1709 		goto err;
1710 	}
1711 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1712 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1713 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1714 				     HASH_SPACE_LEFT(params.kctx_len), 0);
1715 	if (params.sg_len > req->nbytes)
1716 		params.sg_len = req->nbytes;
1717 	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1718 			req_ctx->reqlen;
1719 	params.opad_needed = 0;
1720 	params.more = 1;
1721 	params.last = 0;
1722 	params.bfr_len = req_ctx->reqlen;
1723 	params.scmd1 = 0;
1724 	req_ctx->hctx_wr.srcsg = req->src;
1725 
1726 	params.hash_size = params.alg_prm.result_size;
1727 	req_ctx->data_len += params.sg_len + params.bfr_len;
1728 	skb = create_hash_wr(req, &params);
1729 	if (IS_ERR(skb)) {
1730 		error = PTR_ERR(skb);
1731 		goto unmap;
1732 	}
1733 
1734 	req_ctx->hctx_wr.processed += params.sg_len;
1735 	if (remainder) {
1736 		/* Swap buffers */
1737 		swap(req_ctx->reqbfr, req_ctx->skbfr);
1738 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1739 				   req_ctx->reqbfr, remainder, req->nbytes -
1740 				   remainder);
1741 	}
1742 	req_ctx->reqlen = remainder;
1743 	skb->dev = u_ctx->lldi.ports[0];
1744 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1745 	chcr_send_wr(skb);
1746 	return -EINPROGRESS;
1747 unmap:
1748 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1749 err:
1750 	chcr_dec_wrcount(dev);
1751 	return error;
1752 }
1753 
create_last_hash_block(char * bfr_ptr,unsigned int bs,u64 scmd1)1754 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1755 {
1756 	memset(bfr_ptr, 0, bs);
1757 	*bfr_ptr = 0x80;
1758 	if (bs == 64)
1759 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1760 	else
1761 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1762 }
1763 
chcr_ahash_final(struct ahash_request * req)1764 static int chcr_ahash_final(struct ahash_request *req)
1765 {
1766 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1767 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1768 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1769 	struct hash_wr_param params;
1770 	struct sk_buff *skb;
1771 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1772 	struct chcr_context *ctx = h_ctx(rtfm);
1773 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1774 	int error;
1775 	unsigned int cpu;
1776 
1777 	cpu = get_cpu();
1778 	req_ctx->txqidx = cpu % ctx->ntxq;
1779 	req_ctx->rxqidx = cpu % ctx->nrxq;
1780 	put_cpu();
1781 
1782 	error = chcr_inc_wrcount(dev);
1783 	if (error)
1784 		return -ENXIO;
1785 
1786 	chcr_init_hctx_per_wr(req_ctx);
1787 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1788 		params.opad_needed = 1;
1789 	else
1790 		params.opad_needed = 0;
1791 	params.sg_len = 0;
1792 	req_ctx->hctx_wr.isfinal = 1;
1793 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1794 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1795 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1796 		params.opad_needed = 1;
1797 		params.kctx_len *= 2;
1798 	} else {
1799 		params.opad_needed = 0;
1800 	}
1801 
1802 	req_ctx->hctx_wr.result = 1;
1803 	params.bfr_len = req_ctx->reqlen;
1804 	req_ctx->data_len += params.bfr_len + params.sg_len;
1805 	req_ctx->hctx_wr.srcsg = req->src;
1806 	if (req_ctx->reqlen == 0) {
1807 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1808 		params.last = 0;
1809 		params.more = 1;
1810 		params.scmd1 = 0;
1811 		params.bfr_len = bs;
1812 
1813 	} else {
1814 		params.scmd1 = req_ctx->data_len;
1815 		params.last = 1;
1816 		params.more = 0;
1817 	}
1818 	params.hash_size = crypto_ahash_digestsize(rtfm);
1819 	skb = create_hash_wr(req, &params);
1820 	if (IS_ERR(skb)) {
1821 		error = PTR_ERR(skb);
1822 		goto err;
1823 	}
1824 	req_ctx->reqlen = 0;
1825 	skb->dev = u_ctx->lldi.ports[0];
1826 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1827 	chcr_send_wr(skb);
1828 	return -EINPROGRESS;
1829 err:
1830 	chcr_dec_wrcount(dev);
1831 	return error;
1832 }
1833 
chcr_ahash_finup(struct ahash_request * req)1834 static int chcr_ahash_finup(struct ahash_request *req)
1835 {
1836 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1837 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1838 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1839 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1840 	struct chcr_context *ctx = h_ctx(rtfm);
1841 	struct sk_buff *skb;
1842 	struct hash_wr_param params;
1843 	u8  bs;
1844 	int error;
1845 	unsigned int cpu;
1846 
1847 	cpu = get_cpu();
1848 	req_ctx->txqidx = cpu % ctx->ntxq;
1849 	req_ctx->rxqidx = cpu % ctx->nrxq;
1850 	put_cpu();
1851 
1852 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1853 	error = chcr_inc_wrcount(dev);
1854 	if (error)
1855 		return -ENXIO;
1856 
1857 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1858 						req_ctx->txqidx) &&
1859 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1860 			error = -ENOSPC;
1861 			goto err;
1862 	}
1863 	chcr_init_hctx_per_wr(req_ctx);
1864 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1865 	if (error) {
1866 		error = -ENOMEM;
1867 		goto err;
1868 	}
1869 
1870 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1871 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1872 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1873 		params.kctx_len *= 2;
1874 		params.opad_needed = 1;
1875 	} else {
1876 		params.opad_needed = 0;
1877 	}
1878 
1879 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1880 				    HASH_SPACE_LEFT(params.kctx_len), 0);
1881 	if (params.sg_len < req->nbytes) {
1882 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1883 			params.kctx_len /= 2;
1884 			params.opad_needed = 0;
1885 		}
1886 		params.last = 0;
1887 		params.more = 1;
1888 		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1889 					- req_ctx->reqlen;
1890 		params.hash_size = params.alg_prm.result_size;
1891 		params.scmd1 = 0;
1892 	} else {
1893 		params.last = 1;
1894 		params.more = 0;
1895 		params.sg_len = req->nbytes;
1896 		params.hash_size = crypto_ahash_digestsize(rtfm);
1897 		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1898 				params.sg_len;
1899 	}
1900 	params.bfr_len = req_ctx->reqlen;
1901 	req_ctx->data_len += params.bfr_len + params.sg_len;
1902 	req_ctx->hctx_wr.result = 1;
1903 	req_ctx->hctx_wr.srcsg = req->src;
1904 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1905 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1906 		params.last = 0;
1907 		params.more = 1;
1908 		params.scmd1 = 0;
1909 		params.bfr_len = bs;
1910 	}
1911 	skb = create_hash_wr(req, &params);
1912 	if (IS_ERR(skb)) {
1913 		error = PTR_ERR(skb);
1914 		goto unmap;
1915 	}
1916 	req_ctx->reqlen = 0;
1917 	req_ctx->hctx_wr.processed += params.sg_len;
1918 	skb->dev = u_ctx->lldi.ports[0];
1919 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
1920 	chcr_send_wr(skb);
1921 	return -EINPROGRESS;
1922 unmap:
1923 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1924 err:
1925 	chcr_dec_wrcount(dev);
1926 	return error;
1927 }
1928 
chcr_ahash_digest(struct ahash_request * req)1929 static int chcr_ahash_digest(struct ahash_request *req)
1930 {
1931 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1932 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1933 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1934 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm));
1935 	struct chcr_context *ctx = h_ctx(rtfm);
1936 	struct sk_buff *skb;
1937 	struct hash_wr_param params;
1938 	u8  bs;
1939 	int error;
1940 	unsigned int cpu;
1941 
1942 	cpu = get_cpu();
1943 	req_ctx->txqidx = cpu % ctx->ntxq;
1944 	req_ctx->rxqidx = cpu % ctx->nrxq;
1945 	put_cpu();
1946 
1947 	rtfm->init(req);
1948 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1949 	error = chcr_inc_wrcount(dev);
1950 	if (error)
1951 		return -ENXIO;
1952 
1953 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1954 						req_ctx->txqidx) &&
1955 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
1956 			error = -ENOSPC;
1957 			goto err;
1958 	}
1959 
1960 	chcr_init_hctx_per_wr(req_ctx);
1961 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1962 	if (error) {
1963 		error = -ENOMEM;
1964 		goto err;
1965 	}
1966 
1967 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1968 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1969 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1970 		params.kctx_len *= 2;
1971 		params.opad_needed = 1;
1972 	} else {
1973 		params.opad_needed = 0;
1974 	}
1975 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1976 				HASH_SPACE_LEFT(params.kctx_len), 0);
1977 	if (params.sg_len < req->nbytes) {
1978 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1979 			params.kctx_len /= 2;
1980 			params.opad_needed = 0;
1981 		}
1982 		params.last = 0;
1983 		params.more = 1;
1984 		params.scmd1 = 0;
1985 		params.sg_len = rounddown(params.sg_len, bs);
1986 		params.hash_size = params.alg_prm.result_size;
1987 	} else {
1988 		params.sg_len = req->nbytes;
1989 		params.hash_size = crypto_ahash_digestsize(rtfm);
1990 		params.last = 1;
1991 		params.more = 0;
1992 		params.scmd1 = req->nbytes + req_ctx->data_len;
1993 
1994 	}
1995 	params.bfr_len = 0;
1996 	req_ctx->hctx_wr.result = 1;
1997 	req_ctx->hctx_wr.srcsg = req->src;
1998 	req_ctx->data_len += params.bfr_len + params.sg_len;
1999 
2000 	if (req->nbytes == 0) {
2001 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
2002 		params.more = 1;
2003 		params.bfr_len = bs;
2004 	}
2005 
2006 	skb = create_hash_wr(req, &params);
2007 	if (IS_ERR(skb)) {
2008 		error = PTR_ERR(skb);
2009 		goto unmap;
2010 	}
2011 	req_ctx->hctx_wr.processed += params.sg_len;
2012 	skb->dev = u_ctx->lldi.ports[0];
2013 	set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx);
2014 	chcr_send_wr(skb);
2015 	return -EINPROGRESS;
2016 unmap:
2017 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2018 err:
2019 	chcr_dec_wrcount(dev);
2020 	return error;
2021 }
2022 
chcr_ahash_continue(struct ahash_request * req)2023 static int chcr_ahash_continue(struct ahash_request *req)
2024 {
2025 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2026 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2027 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
2028 	struct chcr_context *ctx = h_ctx(rtfm);
2029 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2030 	struct sk_buff *skb;
2031 	struct hash_wr_param params;
2032 	u8  bs;
2033 	int error;
2034 	unsigned int cpu;
2035 
2036 	cpu = get_cpu();
2037 	reqctx->txqidx = cpu % ctx->ntxq;
2038 	reqctx->rxqidx = cpu % ctx->nrxq;
2039 	put_cpu();
2040 
2041 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2042 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
2043 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
2044 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2045 		params.kctx_len *= 2;
2046 		params.opad_needed = 1;
2047 	} else {
2048 		params.opad_needed = 0;
2049 	}
2050 	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
2051 					    HASH_SPACE_LEFT(params.kctx_len),
2052 					    hctx_wr->src_ofst);
2053 	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
2054 		params.sg_len = req->nbytes - hctx_wr->processed;
2055 	if (!hctx_wr->result ||
2056 	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
2057 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
2058 			params.kctx_len /= 2;
2059 			params.opad_needed = 0;
2060 		}
2061 		params.last = 0;
2062 		params.more = 1;
2063 		params.sg_len = rounddown(params.sg_len, bs);
2064 		params.hash_size = params.alg_prm.result_size;
2065 		params.scmd1 = 0;
2066 	} else {
2067 		params.last = 1;
2068 		params.more = 0;
2069 		params.hash_size = crypto_ahash_digestsize(rtfm);
2070 		params.scmd1 = reqctx->data_len + params.sg_len;
2071 	}
2072 	params.bfr_len = 0;
2073 	reqctx->data_len += params.sg_len;
2074 	skb = create_hash_wr(req, &params);
2075 	if (IS_ERR(skb)) {
2076 		error = PTR_ERR(skb);
2077 		goto err;
2078 	}
2079 	hctx_wr->processed += params.sg_len;
2080 	skb->dev = u_ctx->lldi.ports[0];
2081 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
2082 	chcr_send_wr(skb);
2083 	return 0;
2084 err:
2085 	return error;
2086 }
2087 
chcr_handle_ahash_resp(struct ahash_request * req,unsigned char * input,int err)2088 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
2089 					  unsigned char *input,
2090 					  int err)
2091 {
2092 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2093 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
2094 	int digestsize, updated_digestsize;
2095 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
2096 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2097 	struct chcr_dev *dev = h_ctx(tfm)->dev;
2098 
2099 	if (input == NULL)
2100 		goto out;
2101 	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2102 	updated_digestsize = digestsize;
2103 	if (digestsize == SHA224_DIGEST_SIZE)
2104 		updated_digestsize = SHA256_DIGEST_SIZE;
2105 	else if (digestsize == SHA384_DIGEST_SIZE)
2106 		updated_digestsize = SHA512_DIGEST_SIZE;
2107 
2108 	if (hctx_wr->dma_addr) {
2109 		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2110 				 hctx_wr->dma_len, DMA_TO_DEVICE);
2111 		hctx_wr->dma_addr = 0;
2112 	}
2113 	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2114 				 req->nbytes)) {
2115 		if (hctx_wr->result == 1) {
2116 			hctx_wr->result = 0;
2117 			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2118 			       digestsize);
2119 		} else {
2120 			memcpy(reqctx->partial_hash,
2121 			       input + sizeof(struct cpl_fw6_pld),
2122 			       updated_digestsize);
2123 
2124 		}
2125 		goto unmap;
2126 	}
2127 	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2128 	       updated_digestsize);
2129 
2130 	err = chcr_ahash_continue(req);
2131 	if (err)
2132 		goto unmap;
2133 	return;
2134 unmap:
2135 	if (hctx_wr->is_sg_map)
2136 		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2137 
2138 
2139 out:
2140 	chcr_dec_wrcount(dev);
2141 	req->base.complete(&req->base, err);
2142 }
2143 
2144 /*
2145  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2146  *	@req: crypto request
2147  */
chcr_handle_resp(struct crypto_async_request * req,unsigned char * input,int err)2148 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2149 			 int err)
2150 {
2151 	struct crypto_tfm *tfm = req->tfm;
2152 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2153 	struct adapter *adap = padap(ctx->dev);
2154 
2155 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2156 	case CRYPTO_ALG_TYPE_AEAD:
2157 		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2158 		break;
2159 
2160 	case CRYPTO_ALG_TYPE_SKCIPHER:
2161 		 chcr_handle_cipher_resp(skcipher_request_cast(req),
2162 					       input, err);
2163 		break;
2164 	case CRYPTO_ALG_TYPE_AHASH:
2165 		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2166 		}
2167 	atomic_inc(&adap->chcr_stats.complete);
2168 	return err;
2169 }
chcr_ahash_export(struct ahash_request * areq,void * out)2170 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2171 {
2172 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2173 	struct chcr_ahash_req_ctx *state = out;
2174 
2175 	state->reqlen = req_ctx->reqlen;
2176 	state->data_len = req_ctx->data_len;
2177 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2178 	memcpy(state->partial_hash, req_ctx->partial_hash,
2179 	       CHCR_HASH_MAX_DIGEST_SIZE);
2180 	chcr_init_hctx_per_wr(state);
2181 	return 0;
2182 }
2183 
chcr_ahash_import(struct ahash_request * areq,const void * in)2184 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2185 {
2186 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2187 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2188 
2189 	req_ctx->reqlen = state->reqlen;
2190 	req_ctx->data_len = state->data_len;
2191 	req_ctx->reqbfr = req_ctx->bfr1;
2192 	req_ctx->skbfr = req_ctx->bfr2;
2193 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2194 	memcpy(req_ctx->partial_hash, state->partial_hash,
2195 	       CHCR_HASH_MAX_DIGEST_SIZE);
2196 	chcr_init_hctx_per_wr(req_ctx);
2197 	return 0;
2198 }
2199 
chcr_ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2200 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2201 			     unsigned int keylen)
2202 {
2203 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2204 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2205 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2206 	unsigned int i, err = 0, updated_digestsize;
2207 
2208 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2209 
2210 	/* use the key to calculate the ipad and opad. ipad will sent with the
2211 	 * first request's data. opad will be sent with the final hash result
2212 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2213 	 */
2214 	shash->tfm = hmacctx->base_hash;
2215 	if (keylen > bs) {
2216 		err = crypto_shash_digest(shash, key, keylen,
2217 					  hmacctx->ipad);
2218 		if (err)
2219 			goto out;
2220 		keylen = digestsize;
2221 	} else {
2222 		memcpy(hmacctx->ipad, key, keylen);
2223 	}
2224 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2225 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2226 
2227 	for (i = 0; i < bs / sizeof(int); i++) {
2228 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2229 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2230 	}
2231 
2232 	updated_digestsize = digestsize;
2233 	if (digestsize == SHA224_DIGEST_SIZE)
2234 		updated_digestsize = SHA256_DIGEST_SIZE;
2235 	else if (digestsize == SHA384_DIGEST_SIZE)
2236 		updated_digestsize = SHA512_DIGEST_SIZE;
2237 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2238 					hmacctx->ipad, digestsize);
2239 	if (err)
2240 		goto out;
2241 	chcr_change_order(hmacctx->ipad, updated_digestsize);
2242 
2243 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2244 					hmacctx->opad, digestsize);
2245 	if (err)
2246 		goto out;
2247 	chcr_change_order(hmacctx->opad, updated_digestsize);
2248 out:
2249 	return err;
2250 }
2251 
chcr_aes_xts_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int key_len)2252 static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
2253 			       unsigned int key_len)
2254 {
2255 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2256 	unsigned short context_size = 0;
2257 	int err;
2258 
2259 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2260 	if (err)
2261 		goto badkey_err;
2262 
2263 	memcpy(ablkctx->key, key, key_len);
2264 	ablkctx->enckey_len = key_len;
2265 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2266 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2267 	/* Both keys for xts must be aligned to 16 byte boundary
2268 	 * by padding with zeros. So for 24 byte keys padding 8 zeroes.
2269 	 */
2270 	if (key_len == 48) {
2271 		context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len
2272 				+ 16) >> 4;
2273 		memmove(ablkctx->key + 32, ablkctx->key + 24, 24);
2274 		memset(ablkctx->key + 24, 0, 8);
2275 		memset(ablkctx->key + 56, 0, 8);
2276 		ablkctx->enckey_len = 64;
2277 		ablkctx->key_ctx_hdr =
2278 			FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192,
2279 					 CHCR_KEYCTX_NO_KEY, 1,
2280 					 0, context_size);
2281 	} else {
2282 		ablkctx->key_ctx_hdr =
2283 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2284 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2285 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2286 				 CHCR_KEYCTX_NO_KEY, 1,
2287 				 0, context_size);
2288 	}
2289 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2290 	return 0;
2291 badkey_err:
2292 	ablkctx->enckey_len = 0;
2293 
2294 	return err;
2295 }
2296 
chcr_sha_init(struct ahash_request * areq)2297 static int chcr_sha_init(struct ahash_request *areq)
2298 {
2299 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2300 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2301 	int digestsize =  crypto_ahash_digestsize(tfm);
2302 
2303 	req_ctx->data_len = 0;
2304 	req_ctx->reqlen = 0;
2305 	req_ctx->reqbfr = req_ctx->bfr1;
2306 	req_ctx->skbfr = req_ctx->bfr2;
2307 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2308 
2309 	return 0;
2310 }
2311 
chcr_sha_cra_init(struct crypto_tfm * tfm)2312 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2313 {
2314 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2315 				 sizeof(struct chcr_ahash_req_ctx));
2316 	return chcr_device_init(crypto_tfm_ctx(tfm));
2317 }
2318 
chcr_hmac_init(struct ahash_request * areq)2319 static int chcr_hmac_init(struct ahash_request *areq)
2320 {
2321 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2322 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2323 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2324 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2325 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2326 
2327 	chcr_sha_init(areq);
2328 	req_ctx->data_len = bs;
2329 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2330 		if (digestsize == SHA224_DIGEST_SIZE)
2331 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2332 			       SHA256_DIGEST_SIZE);
2333 		else if (digestsize == SHA384_DIGEST_SIZE)
2334 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2335 			       SHA512_DIGEST_SIZE);
2336 		else
2337 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2338 			       digestsize);
2339 	}
2340 	return 0;
2341 }
2342 
chcr_hmac_cra_init(struct crypto_tfm * tfm)2343 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2344 {
2345 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2346 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2347 	unsigned int digestsize =
2348 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2349 
2350 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2351 				 sizeof(struct chcr_ahash_req_ctx));
2352 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2353 	if (IS_ERR(hmacctx->base_hash))
2354 		return PTR_ERR(hmacctx->base_hash);
2355 	return chcr_device_init(crypto_tfm_ctx(tfm));
2356 }
2357 
chcr_hmac_cra_exit(struct crypto_tfm * tfm)2358 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2359 {
2360 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2361 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2362 
2363 	if (hmacctx->base_hash) {
2364 		chcr_free_shash(hmacctx->base_hash);
2365 		hmacctx->base_hash = NULL;
2366 	}
2367 }
2368 
chcr_aead_common_exit(struct aead_request * req)2369 inline void chcr_aead_common_exit(struct aead_request *req)
2370 {
2371 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2372 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2373 	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2374 
2375 	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2376 }
2377 
chcr_aead_common_init(struct aead_request * req)2378 static int chcr_aead_common_init(struct aead_request *req)
2379 {
2380 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2381 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2382 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2383 	unsigned int authsize = crypto_aead_authsize(tfm);
2384 	int error = -EINVAL;
2385 
2386 	/* validate key size */
2387 	if (aeadctx->enckey_len == 0)
2388 		goto err;
2389 	if (reqctx->op && req->cryptlen < authsize)
2390 		goto err;
2391 	if (reqctx->b0_len)
2392 		reqctx->scratch_pad = reqctx->iv + IV;
2393 	else
2394 		reqctx->scratch_pad = NULL;
2395 
2396 	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2397 				  reqctx->op);
2398 	if (error) {
2399 		error = -ENOMEM;
2400 		goto err;
2401 	}
2402 
2403 	return 0;
2404 err:
2405 	return error;
2406 }
2407 
chcr_aead_need_fallback(struct aead_request * req,int dst_nents,int aadmax,int wrlen,unsigned short op_type)2408 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2409 				   int aadmax, int wrlen,
2410 				   unsigned short op_type)
2411 {
2412 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2413 
2414 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2415 	    dst_nents > MAX_DSGL_ENT ||
2416 	    (req->assoclen > aadmax) ||
2417 	    (wrlen > SGE_MAX_WR_LEN))
2418 		return 1;
2419 	return 0;
2420 }
2421 
chcr_aead_fallback(struct aead_request * req,unsigned short op_type)2422 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2423 {
2424 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2425 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2426 	struct aead_request *subreq = aead_request_ctx(req);
2427 
2428 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2429 	aead_request_set_callback(subreq, req->base.flags,
2430 				  req->base.complete, req->base.data);
2431 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2432 				 req->iv);
2433 	aead_request_set_ad(subreq, req->assoclen);
2434 	return op_type ? crypto_aead_decrypt(subreq) :
2435 		crypto_aead_encrypt(subreq);
2436 }
2437 
create_authenc_wr(struct aead_request * req,unsigned short qid,int size)2438 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2439 					 unsigned short qid,
2440 					 int size)
2441 {
2442 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2443 	struct chcr_context *ctx = a_ctx(tfm);
2444 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2445 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2446 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2447 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2448 	struct sk_buff *skb = NULL;
2449 	struct chcr_wr *chcr_req;
2450 	struct cpl_rx_phys_dsgl *phys_cpl;
2451 	struct ulptx_sgl *ulptx;
2452 	unsigned int transhdr_len;
2453 	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2454 	unsigned int   kctx_len = 0, dnents, snents;
2455 	unsigned int  authsize = crypto_aead_authsize(tfm);
2456 	int error = -EINVAL;
2457 	u8 *ivptr;
2458 	int null = 0;
2459 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2460 		GFP_ATOMIC;
2461 	struct adapter *adap = padap(ctx->dev);
2462 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2463 
2464 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2465 	if (req->cryptlen == 0)
2466 		return NULL;
2467 
2468 	reqctx->b0_len = 0;
2469 	error = chcr_aead_common_init(req);
2470 	if (error)
2471 		return ERR_PTR(error);
2472 
2473 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2474 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2475 		null = 1;
2476 	}
2477 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2478 		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2479 	dnents += MIN_AUTH_SG; // For IV
2480 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2481 			       CHCR_SRC_SG_SIZE, 0);
2482 	dst_size = get_space_for_phys_dsgl(dnents);
2483 	kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4)
2484 		- sizeof(chcr_req->key_ctx);
2485 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2486 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2487 			SGE_MAX_WR_LEN;
2488 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2489 			: (sgl_len(snents) * 8);
2490 	transhdr_len += temp;
2491 	transhdr_len = roundup(transhdr_len, 16);
2492 
2493 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2494 				    transhdr_len, reqctx->op)) {
2495 		atomic_inc(&adap->chcr_stats.fallback);
2496 		chcr_aead_common_exit(req);
2497 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2498 	}
2499 	skb = alloc_skb(transhdr_len, flags);
2500 	if (!skb) {
2501 		error = -ENOMEM;
2502 		goto err;
2503 	}
2504 
2505 	chcr_req = __skb_put_zero(skb, transhdr_len);
2506 
2507 	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2508 
2509 	/*
2510 	 * Input order	is AAD,IV and Payload. where IV should be included as
2511 	 * the part of authdata. All other fields should be filled according
2512 	 * to the hardware spec
2513 	 */
2514 	chcr_req->sec_cpl.op_ivinsrtofst =
2515 				FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2516 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2517 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2518 					null ? 0 : 1 + IV,
2519 					null ? 0 : IV + req->assoclen,
2520 					req->assoclen + IV + 1,
2521 					(temp & 0x1F0) >> 4);
2522 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2523 					temp & 0xF,
2524 					null ? 0 : req->assoclen + IV + 1,
2525 					temp, temp);
2526 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2527 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2528 		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2529 	else
2530 		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2531 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2532 					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2533 					temp,
2534 					actx->auth_mode, aeadctx->hmac_ctrl,
2535 					IV >> 1);
2536 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2537 					 0, 0, dst_size);
2538 
2539 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2540 	if (reqctx->op == CHCR_ENCRYPT_OP ||
2541 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2542 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2543 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2544 		       aeadctx->enckey_len);
2545 	else
2546 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2547 		       aeadctx->enckey_len);
2548 
2549 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2550 	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2551 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2552 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2553 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2554 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2555 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2556 		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2557 		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2558 				CTR_RFC3686_IV_SIZE);
2559 		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2560 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2561 	} else {
2562 		memcpy(ivptr, req->iv, IV);
2563 	}
2564 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2565 	chcr_add_aead_src_ent(req, ulptx);
2566 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2567 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2568 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2569 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2570 		   transhdr_len, temp, 0);
2571 	reqctx->skb = skb;
2572 
2573 	return skb;
2574 err:
2575 	chcr_aead_common_exit(req);
2576 
2577 	return ERR_PTR(error);
2578 }
2579 
chcr_aead_dma_map(struct device * dev,struct aead_request * req,unsigned short op_type)2580 int chcr_aead_dma_map(struct device *dev,
2581 		      struct aead_request *req,
2582 		      unsigned short op_type)
2583 {
2584 	int error;
2585 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2586 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2587 	unsigned int authsize = crypto_aead_authsize(tfm);
2588 	int src_len, dst_len;
2589 
2590 	/* calculate and handle src and dst sg length separately
2591 	 * for inplace and out-of place operations
2592 	 */
2593 	if (req->src == req->dst) {
2594 		src_len = req->assoclen + req->cryptlen + (op_type ?
2595 							0 : authsize);
2596 		dst_len = src_len;
2597 	} else {
2598 		src_len = req->assoclen + req->cryptlen;
2599 		dst_len = req->assoclen + req->cryptlen + (op_type ?
2600 							-authsize : authsize);
2601 	}
2602 
2603 	if (!req->cryptlen || !src_len || !dst_len)
2604 		return 0;
2605 	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2606 					DMA_BIDIRECTIONAL);
2607 	if (dma_mapping_error(dev, reqctx->iv_dma))
2608 		return -ENOMEM;
2609 	if (reqctx->b0_len)
2610 		reqctx->b0_dma = reqctx->iv_dma + IV;
2611 	else
2612 		reqctx->b0_dma = 0;
2613 	if (req->src == req->dst) {
2614 		error = dma_map_sg(dev, req->src,
2615 				sg_nents_for_len(req->src, src_len),
2616 					DMA_BIDIRECTIONAL);
2617 		if (!error)
2618 			goto err;
2619 	} else {
2620 		error = dma_map_sg(dev, req->src,
2621 				   sg_nents_for_len(req->src, src_len),
2622 				   DMA_TO_DEVICE);
2623 		if (!error)
2624 			goto err;
2625 		error = dma_map_sg(dev, req->dst,
2626 				   sg_nents_for_len(req->dst, dst_len),
2627 				   DMA_FROM_DEVICE);
2628 		if (!error) {
2629 			dma_unmap_sg(dev, req->src,
2630 				     sg_nents_for_len(req->src, src_len),
2631 				     DMA_TO_DEVICE);
2632 			goto err;
2633 		}
2634 	}
2635 
2636 	return 0;
2637 err:
2638 	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2639 	return -ENOMEM;
2640 }
2641 
chcr_aead_dma_unmap(struct device * dev,struct aead_request * req,unsigned short op_type)2642 void chcr_aead_dma_unmap(struct device *dev,
2643 			 struct aead_request *req,
2644 			 unsigned short op_type)
2645 {
2646 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2647 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2648 	unsigned int authsize = crypto_aead_authsize(tfm);
2649 	int src_len, dst_len;
2650 
2651 	/* calculate and handle src and dst sg length separately
2652 	 * for inplace and out-of place operations
2653 	 */
2654 	if (req->src == req->dst) {
2655 		src_len = req->assoclen + req->cryptlen + (op_type ?
2656 							0 : authsize);
2657 		dst_len = src_len;
2658 	} else {
2659 		src_len = req->assoclen + req->cryptlen;
2660 		dst_len = req->assoclen + req->cryptlen + (op_type ?
2661 						-authsize : authsize);
2662 	}
2663 
2664 	if (!req->cryptlen || !src_len || !dst_len)
2665 		return;
2666 
2667 	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2668 					DMA_BIDIRECTIONAL);
2669 	if (req->src == req->dst) {
2670 		dma_unmap_sg(dev, req->src,
2671 			     sg_nents_for_len(req->src, src_len),
2672 			     DMA_BIDIRECTIONAL);
2673 	} else {
2674 		dma_unmap_sg(dev, req->src,
2675 			     sg_nents_for_len(req->src, src_len),
2676 			     DMA_TO_DEVICE);
2677 		dma_unmap_sg(dev, req->dst,
2678 			     sg_nents_for_len(req->dst, dst_len),
2679 			     DMA_FROM_DEVICE);
2680 	}
2681 }
2682 
chcr_add_aead_src_ent(struct aead_request * req,struct ulptx_sgl * ulptx)2683 void chcr_add_aead_src_ent(struct aead_request *req,
2684 			   struct ulptx_sgl *ulptx)
2685 {
2686 	struct ulptx_walk ulp_walk;
2687 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2688 
2689 	if (reqctx->imm) {
2690 		u8 *buf = (u8 *)ulptx;
2691 
2692 		if (reqctx->b0_len) {
2693 			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2694 			buf += reqctx->b0_len;
2695 		}
2696 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2697 				   buf, req->cryptlen + req->assoclen, 0);
2698 	} else {
2699 		ulptx_walk_init(&ulp_walk, ulptx);
2700 		if (reqctx->b0_len)
2701 			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2702 					    reqctx->b0_dma);
2703 		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2704 				  req->assoclen,  0);
2705 		ulptx_walk_end(&ulp_walk);
2706 	}
2707 }
2708 
chcr_add_aead_dst_ent(struct aead_request * req,struct cpl_rx_phys_dsgl * phys_cpl,unsigned short qid)2709 void chcr_add_aead_dst_ent(struct aead_request *req,
2710 			   struct cpl_rx_phys_dsgl *phys_cpl,
2711 			   unsigned short qid)
2712 {
2713 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2714 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2715 	struct dsgl_walk dsgl_walk;
2716 	unsigned int authsize = crypto_aead_authsize(tfm);
2717 	struct chcr_context *ctx = a_ctx(tfm);
2718 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2719 	u32 temp;
2720 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2721 
2722 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2723 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2724 	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2725 	temp = req->assoclen + req->cryptlen +
2726 		(reqctx->op ? -authsize : authsize);
2727 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2728 	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2729 }
2730 
chcr_add_cipher_src_ent(struct skcipher_request * req,void * ulptx,struct cipher_wr_param * wrparam)2731 void chcr_add_cipher_src_ent(struct skcipher_request *req,
2732 			     void *ulptx,
2733 			     struct  cipher_wr_param *wrparam)
2734 {
2735 	struct ulptx_walk ulp_walk;
2736 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2737 	u8 *buf = ulptx;
2738 
2739 	memcpy(buf, reqctx->iv, IV);
2740 	buf += IV;
2741 	if (reqctx->imm) {
2742 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2743 				   buf, wrparam->bytes, reqctx->processed);
2744 	} else {
2745 		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2746 		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2747 				  reqctx->src_ofst);
2748 		reqctx->srcsg = ulp_walk.last_sg;
2749 		reqctx->src_ofst = ulp_walk.last_sg_len;
2750 		ulptx_walk_end(&ulp_walk);
2751 	}
2752 }
2753 
chcr_add_cipher_dst_ent(struct skcipher_request * req,struct cpl_rx_phys_dsgl * phys_cpl,struct cipher_wr_param * wrparam,unsigned short qid)2754 void chcr_add_cipher_dst_ent(struct skcipher_request *req,
2755 			     struct cpl_rx_phys_dsgl *phys_cpl,
2756 			     struct  cipher_wr_param *wrparam,
2757 			     unsigned short qid)
2758 {
2759 	struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
2760 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
2761 	struct chcr_context *ctx = c_ctx(tfm);
2762 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2763 	struct dsgl_walk dsgl_walk;
2764 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2765 
2766 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2767 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2768 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2769 			 reqctx->dst_ofst);
2770 	reqctx->dstsg = dsgl_walk.last_sg;
2771 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2772 	dsgl_walk_end(&dsgl_walk, qid, rx_channel_id);
2773 }
2774 
chcr_add_hash_src_ent(struct ahash_request * req,struct ulptx_sgl * ulptx,struct hash_wr_param * param)2775 void chcr_add_hash_src_ent(struct ahash_request *req,
2776 			   struct ulptx_sgl *ulptx,
2777 			   struct hash_wr_param *param)
2778 {
2779 	struct ulptx_walk ulp_walk;
2780 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2781 
2782 	if (reqctx->hctx_wr.imm) {
2783 		u8 *buf = (u8 *)ulptx;
2784 
2785 		if (param->bfr_len) {
2786 			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2787 			buf += param->bfr_len;
2788 		}
2789 
2790 		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2791 				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2792 				   param->sg_len, 0);
2793 	} else {
2794 		ulptx_walk_init(&ulp_walk, ulptx);
2795 		if (param->bfr_len)
2796 			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2797 					    reqctx->hctx_wr.dma_addr);
2798 		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2799 				  param->sg_len, reqctx->hctx_wr.src_ofst);
2800 		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2801 		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2802 		ulptx_walk_end(&ulp_walk);
2803 	}
2804 }
2805 
chcr_hash_dma_map(struct device * dev,struct ahash_request * req)2806 int chcr_hash_dma_map(struct device *dev,
2807 		      struct ahash_request *req)
2808 {
2809 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2810 	int error = 0;
2811 
2812 	if (!req->nbytes)
2813 		return 0;
2814 	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2815 			   DMA_TO_DEVICE);
2816 	if (!error)
2817 		return -ENOMEM;
2818 	req_ctx->hctx_wr.is_sg_map = 1;
2819 	return 0;
2820 }
2821 
chcr_hash_dma_unmap(struct device * dev,struct ahash_request * req)2822 void chcr_hash_dma_unmap(struct device *dev,
2823 			 struct ahash_request *req)
2824 {
2825 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2826 
2827 	if (!req->nbytes)
2828 		return;
2829 
2830 	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2831 			   DMA_TO_DEVICE);
2832 	req_ctx->hctx_wr.is_sg_map = 0;
2833 
2834 }
2835 
chcr_cipher_dma_map(struct device * dev,struct skcipher_request * req)2836 int chcr_cipher_dma_map(struct device *dev,
2837 			struct skcipher_request *req)
2838 {
2839 	int error;
2840 
2841 	if (req->src == req->dst) {
2842 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2843 				   DMA_BIDIRECTIONAL);
2844 		if (!error)
2845 			goto err;
2846 	} else {
2847 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2848 				   DMA_TO_DEVICE);
2849 		if (!error)
2850 			goto err;
2851 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2852 				   DMA_FROM_DEVICE);
2853 		if (!error) {
2854 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2855 				   DMA_TO_DEVICE);
2856 			goto err;
2857 		}
2858 	}
2859 
2860 	return 0;
2861 err:
2862 	return -ENOMEM;
2863 }
2864 
chcr_cipher_dma_unmap(struct device * dev,struct skcipher_request * req)2865 void chcr_cipher_dma_unmap(struct device *dev,
2866 			   struct skcipher_request *req)
2867 {
2868 	if (req->src == req->dst) {
2869 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2870 				   DMA_BIDIRECTIONAL);
2871 	} else {
2872 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2873 				   DMA_TO_DEVICE);
2874 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2875 				   DMA_FROM_DEVICE);
2876 	}
2877 }
2878 
set_msg_len(u8 * block,unsigned int msglen,int csize)2879 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2880 {
2881 	__be32 data;
2882 
2883 	memset(block, 0, csize);
2884 	block += csize;
2885 
2886 	if (csize >= 4)
2887 		csize = 4;
2888 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2889 		return -EOVERFLOW;
2890 
2891 	data = cpu_to_be32(msglen);
2892 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2893 
2894 	return 0;
2895 }
2896 
generate_b0(struct aead_request * req,u8 * ivptr,unsigned short op_type)2897 static int generate_b0(struct aead_request *req, u8 *ivptr,
2898 			unsigned short op_type)
2899 {
2900 	unsigned int l, lp, m;
2901 	int rc;
2902 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2903 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2904 	u8 *b0 = reqctx->scratch_pad;
2905 
2906 	m = crypto_aead_authsize(aead);
2907 
2908 	memcpy(b0, ivptr, 16);
2909 
2910 	lp = b0[0];
2911 	l = lp + 1;
2912 
2913 	/* set m, bits 3-5 */
2914 	*b0 |= (8 * ((m - 2) / 2));
2915 
2916 	/* set adata, bit 6, if associated data is used */
2917 	if (req->assoclen)
2918 		*b0 |= 64;
2919 	rc = set_msg_len(b0 + 16 - l,
2920 			 (op_type == CHCR_DECRYPT_OP) ?
2921 			 req->cryptlen - m : req->cryptlen, l);
2922 
2923 	return rc;
2924 }
2925 
crypto_ccm_check_iv(const u8 * iv)2926 static inline int crypto_ccm_check_iv(const u8 *iv)
2927 {
2928 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2929 	if (iv[0] < 1 || iv[0] > 7)
2930 		return -EINVAL;
2931 
2932 	return 0;
2933 }
2934 
ccm_format_packet(struct aead_request * req,u8 * ivptr,unsigned int sub_type,unsigned short op_type,unsigned int assoclen)2935 static int ccm_format_packet(struct aead_request *req,
2936 			     u8 *ivptr,
2937 			     unsigned int sub_type,
2938 			     unsigned short op_type,
2939 			     unsigned int assoclen)
2940 {
2941 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2942 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2943 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2944 	int rc = 0;
2945 
2946 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2947 		ivptr[0] = 3;
2948 		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2949 		memcpy(ivptr + 4, req->iv, 8);
2950 		memset(ivptr + 12, 0, 4);
2951 	} else {
2952 		memcpy(ivptr, req->iv, 16);
2953 	}
2954 	if (assoclen)
2955 		put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]);
2956 
2957 	rc = generate_b0(req, ivptr, op_type);
2958 	/* zero the ctr value */
2959 	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2960 	return rc;
2961 }
2962 
fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu * sec_cpl,unsigned int dst_size,struct aead_request * req,unsigned short op_type)2963 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2964 				  unsigned int dst_size,
2965 				  struct aead_request *req,
2966 				  unsigned short op_type)
2967 {
2968 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2969 	struct chcr_context *ctx = a_ctx(tfm);
2970 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
2971 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
2972 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2973 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2974 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2975 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
2976 	unsigned int ccm_xtra;
2977 	unsigned int tag_offset = 0, auth_offset = 0;
2978 	unsigned int assoclen;
2979 
2980 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
2981 
2982 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2983 		assoclen = req->assoclen - 8;
2984 	else
2985 		assoclen = req->assoclen;
2986 	ccm_xtra = CCM_B0_SIZE +
2987 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2988 
2989 	auth_offset = req->cryptlen ?
2990 		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2991 	if (op_type == CHCR_DECRYPT_OP) {
2992 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2993 			tag_offset = crypto_aead_authsize(tfm);
2994 		else
2995 			auth_offset = 0;
2996 	}
2997 
2998 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1);
2999 	sec_cpl->pldlen =
3000 		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
3001 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
3002 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3003 				1 + IV,	IV + assoclen + ccm_xtra,
3004 				req->assoclen + IV + 1 + ccm_xtra, 0);
3005 
3006 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
3007 					auth_offset, tag_offset,
3008 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
3009 					crypto_aead_authsize(tfm));
3010 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
3011 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
3012 					cipher_mode, mac_mode,
3013 					aeadctx->hmac_ctrl, IV >> 1);
3014 
3015 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
3016 					0, dst_size);
3017 }
3018 
aead_ccm_validate_input(unsigned short op_type,struct aead_request * req,struct chcr_aead_ctx * aeadctx,unsigned int sub_type)3019 static int aead_ccm_validate_input(unsigned short op_type,
3020 				   struct aead_request *req,
3021 				   struct chcr_aead_ctx *aeadctx,
3022 				   unsigned int sub_type)
3023 {
3024 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
3025 		if (crypto_ccm_check_iv(req->iv)) {
3026 			pr_err("CCM: IV check fails\n");
3027 			return -EINVAL;
3028 		}
3029 	} else {
3030 		if (req->assoclen != 16 && req->assoclen != 20) {
3031 			pr_err("RFC4309: Invalid AAD length %d\n",
3032 			       req->assoclen);
3033 			return -EINVAL;
3034 		}
3035 	}
3036 	return 0;
3037 }
3038 
create_aead_ccm_wr(struct aead_request * req,unsigned short qid,int size)3039 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
3040 					  unsigned short qid,
3041 					  int size)
3042 {
3043 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3044 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3045 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3046 	struct sk_buff *skb = NULL;
3047 	struct chcr_wr *chcr_req;
3048 	struct cpl_rx_phys_dsgl *phys_cpl;
3049 	struct ulptx_sgl *ulptx;
3050 	unsigned int transhdr_len;
3051 	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
3052 	unsigned int sub_type, assoclen = req->assoclen;
3053 	unsigned int authsize = crypto_aead_authsize(tfm);
3054 	int error = -EINVAL;
3055 	u8 *ivptr;
3056 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3057 		GFP_ATOMIC;
3058 	struct adapter *adap = padap(a_ctx(tfm)->dev);
3059 
3060 	sub_type = get_aead_subtype(tfm);
3061 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
3062 		assoclen -= 8;
3063 	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
3064 	error = chcr_aead_common_init(req);
3065 	if (error)
3066 		return ERR_PTR(error);
3067 
3068 	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
3069 	if (error)
3070 		goto err;
3071 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
3072 			+ (reqctx->op ? -authsize : authsize),
3073 			CHCR_DST_SG_SIZE, 0);
3074 	dnents += MIN_CCM_SG; // For IV and B0
3075 	dst_size = get_space_for_phys_dsgl(dnents);
3076 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3077 			       CHCR_SRC_SG_SIZE, 0);
3078 	snents += MIN_CCM_SG; //For B0
3079 	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
3080 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3081 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
3082 		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
3083 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
3084 				     reqctx->b0_len, 16) :
3085 		(sgl_len(snents) *  8);
3086 	transhdr_len += temp;
3087 	transhdr_len = roundup(transhdr_len, 16);
3088 
3089 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
3090 				reqctx->b0_len, transhdr_len, reqctx->op)) {
3091 		atomic_inc(&adap->chcr_stats.fallback);
3092 		chcr_aead_common_exit(req);
3093 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3094 	}
3095 	skb = alloc_skb(transhdr_len,  flags);
3096 
3097 	if (!skb) {
3098 		error = -ENOMEM;
3099 		goto err;
3100 	}
3101 
3102 	chcr_req = __skb_put_zero(skb, transhdr_len);
3103 
3104 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
3105 
3106 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3107 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3108 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3109 			aeadctx->key, aeadctx->enckey_len);
3110 
3111 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3112 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3113 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
3114 	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
3115 	if (error)
3116 		goto dstmap_fail;
3117 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3118 	chcr_add_aead_src_ent(req, ulptx);
3119 
3120 	atomic_inc(&adap->chcr_stats.aead_rqst);
3121 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3122 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
3123 		reqctx->b0_len) : 0);
3124 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
3125 		    transhdr_len, temp, 0);
3126 	reqctx->skb = skb;
3127 
3128 	return skb;
3129 dstmap_fail:
3130 	kfree_skb(skb);
3131 err:
3132 	chcr_aead_common_exit(req);
3133 	return ERR_PTR(error);
3134 }
3135 
create_gcm_wr(struct aead_request * req,unsigned short qid,int size)3136 static struct sk_buff *create_gcm_wr(struct aead_request *req,
3137 				     unsigned short qid,
3138 				     int size)
3139 {
3140 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3141 	struct chcr_context *ctx = a_ctx(tfm);
3142 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3143 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3144 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3145 	struct sk_buff *skb = NULL;
3146 	struct chcr_wr *chcr_req;
3147 	struct cpl_rx_phys_dsgl *phys_cpl;
3148 	struct ulptx_sgl *ulptx;
3149 	unsigned int transhdr_len, dnents = 0, snents;
3150 	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
3151 	unsigned int authsize = crypto_aead_authsize(tfm);
3152 	int error = -EINVAL;
3153 	u8 *ivptr;
3154 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
3155 		GFP_ATOMIC;
3156 	struct adapter *adap = padap(ctx->dev);
3157 	unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
3158 
3159 	rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
3160 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3161 		assoclen = req->assoclen - 8;
3162 
3163 	reqctx->b0_len = 0;
3164 	error = chcr_aead_common_init(req);
3165 	if (error)
3166 		return ERR_PTR(error);
3167 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3168 				(reqctx->op ? -authsize : authsize),
3169 				CHCR_DST_SG_SIZE, 0);
3170 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3171 			       CHCR_SRC_SG_SIZE, 0);
3172 	dnents += MIN_GCM_SG; // For IV
3173 	dst_size = get_space_for_phys_dsgl(dnents);
3174 	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3175 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3176 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3177 			SGE_MAX_WR_LEN;
3178 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3179 		(sgl_len(snents) * 8);
3180 	transhdr_len += temp;
3181 	transhdr_len = roundup(transhdr_len, 16);
3182 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3183 			    transhdr_len, reqctx->op)) {
3184 
3185 		atomic_inc(&adap->chcr_stats.fallback);
3186 		chcr_aead_common_exit(req);
3187 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3188 	}
3189 	skb = alloc_skb(transhdr_len, flags);
3190 	if (!skb) {
3191 		error = -ENOMEM;
3192 		goto err;
3193 	}
3194 
3195 	chcr_req = __skb_put_zero(skb, transhdr_len);
3196 
3197 	//Offset of tag from end
3198 	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3199 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3200 						rx_channel_id, 2, 1);
3201 	chcr_req->sec_cpl.pldlen =
3202 		htonl(req->assoclen + IV + req->cryptlen);
3203 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3204 					assoclen ? 1 + IV : 0,
3205 					assoclen ? IV + assoclen : 0,
3206 					req->assoclen + IV + 1, 0);
3207 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3208 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3209 						temp, temp);
3210 	chcr_req->sec_cpl.seqno_numivs =
3211 			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3212 					CHCR_ENCRYPT_OP) ? 1 : 0,
3213 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3214 					CHCR_SCMD_AUTH_MODE_GHASH,
3215 					aeadctx->hmac_ctrl, IV >> 1);
3216 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3217 					0, 0, dst_size);
3218 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3219 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3220 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3221 	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3222 
3223 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3224 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3225 	/* prepare a 16 byte iv */
3226 	/* S   A   L  T |  IV | 0x00000001 */
3227 	if (get_aead_subtype(tfm) ==
3228 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3229 		memcpy(ivptr, aeadctx->salt, 4);
3230 		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3231 	} else {
3232 		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3233 	}
3234 	put_unaligned_be32(0x01, &ivptr[12]);
3235 	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3236 
3237 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3238 	chcr_add_aead_src_ent(req, ulptx);
3239 	atomic_inc(&adap->chcr_stats.aead_rqst);
3240 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3241 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3242 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3243 		    transhdr_len, temp, reqctx->verify);
3244 	reqctx->skb = skb;
3245 	return skb;
3246 
3247 err:
3248 	chcr_aead_common_exit(req);
3249 	return ERR_PTR(error);
3250 }
3251 
3252 
3253 
chcr_aead_cra_init(struct crypto_aead * tfm)3254 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3255 {
3256 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3257 	struct aead_alg *alg = crypto_aead_alg(tfm);
3258 
3259 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3260 					       CRYPTO_ALG_NEED_FALLBACK |
3261 					       CRYPTO_ALG_ASYNC);
3262 	if  (IS_ERR(aeadctx->sw_cipher))
3263 		return PTR_ERR(aeadctx->sw_cipher);
3264 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3265 				 sizeof(struct aead_request) +
3266 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
3267 	return chcr_device_init(a_ctx(tfm));
3268 }
3269 
chcr_aead_cra_exit(struct crypto_aead * tfm)3270 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3271 {
3272 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3273 
3274 	crypto_free_aead(aeadctx->sw_cipher);
3275 }
3276 
chcr_authenc_null_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3277 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3278 					unsigned int authsize)
3279 {
3280 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3281 
3282 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3283 	aeadctx->mayverify = VERIFY_HW;
3284 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3285 }
chcr_authenc_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3286 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3287 				    unsigned int authsize)
3288 {
3289 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3290 	u32 maxauth = crypto_aead_maxauthsize(tfm);
3291 
3292 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3293 	 * true for sha1. authsize == 12 condition should be before
3294 	 * authsize == (maxauth >> 1)
3295 	 */
3296 	if (authsize == ICV_4) {
3297 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3298 		aeadctx->mayverify = VERIFY_HW;
3299 	} else if (authsize == ICV_6) {
3300 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3301 		aeadctx->mayverify = VERIFY_HW;
3302 	} else if (authsize == ICV_10) {
3303 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3304 		aeadctx->mayverify = VERIFY_HW;
3305 	} else if (authsize == ICV_12) {
3306 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3307 		aeadctx->mayverify = VERIFY_HW;
3308 	} else if (authsize == ICV_14) {
3309 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3310 		aeadctx->mayverify = VERIFY_HW;
3311 	} else if (authsize == (maxauth >> 1)) {
3312 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3313 		aeadctx->mayverify = VERIFY_HW;
3314 	} else if (authsize == maxauth) {
3315 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3316 		aeadctx->mayverify = VERIFY_HW;
3317 	} else {
3318 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3319 		aeadctx->mayverify = VERIFY_SW;
3320 	}
3321 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3322 }
3323 
3324 
chcr_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3325 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3326 {
3327 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3328 
3329 	switch (authsize) {
3330 	case ICV_4:
3331 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3332 		aeadctx->mayverify = VERIFY_HW;
3333 		break;
3334 	case ICV_8:
3335 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3336 		aeadctx->mayverify = VERIFY_HW;
3337 		break;
3338 	case ICV_12:
3339 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3340 		aeadctx->mayverify = VERIFY_HW;
3341 		break;
3342 	case ICV_14:
3343 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3344 		aeadctx->mayverify = VERIFY_HW;
3345 		break;
3346 	case ICV_16:
3347 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3348 		aeadctx->mayverify = VERIFY_HW;
3349 		break;
3350 	case ICV_13:
3351 	case ICV_15:
3352 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3353 		aeadctx->mayverify = VERIFY_SW;
3354 		break;
3355 	default:
3356 		return -EINVAL;
3357 	}
3358 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3359 }
3360 
chcr_4106_4309_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3361 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3362 					  unsigned int authsize)
3363 {
3364 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3365 
3366 	switch (authsize) {
3367 	case ICV_8:
3368 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3369 		aeadctx->mayverify = VERIFY_HW;
3370 		break;
3371 	case ICV_12:
3372 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3373 		aeadctx->mayverify = VERIFY_HW;
3374 		break;
3375 	case ICV_16:
3376 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3377 		aeadctx->mayverify = VERIFY_HW;
3378 		break;
3379 	default:
3380 		return -EINVAL;
3381 	}
3382 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3383 }
3384 
chcr_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3385 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3386 				unsigned int authsize)
3387 {
3388 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3389 
3390 	switch (authsize) {
3391 	case ICV_4:
3392 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3393 		aeadctx->mayverify = VERIFY_HW;
3394 		break;
3395 	case ICV_6:
3396 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3397 		aeadctx->mayverify = VERIFY_HW;
3398 		break;
3399 	case ICV_8:
3400 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3401 		aeadctx->mayverify = VERIFY_HW;
3402 		break;
3403 	case ICV_10:
3404 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3405 		aeadctx->mayverify = VERIFY_HW;
3406 		break;
3407 	case ICV_12:
3408 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3409 		aeadctx->mayverify = VERIFY_HW;
3410 		break;
3411 	case ICV_14:
3412 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3413 		aeadctx->mayverify = VERIFY_HW;
3414 		break;
3415 	case ICV_16:
3416 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3417 		aeadctx->mayverify = VERIFY_HW;
3418 		break;
3419 	default:
3420 		return -EINVAL;
3421 	}
3422 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3423 }
3424 
chcr_ccm_common_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3425 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3426 				const u8 *key,
3427 				unsigned int keylen)
3428 {
3429 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3430 	unsigned char ck_size, mk_size;
3431 	int key_ctx_size = 0;
3432 
3433 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3434 	if (keylen == AES_KEYSIZE_128) {
3435 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3436 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3437 	} else if (keylen == AES_KEYSIZE_192) {
3438 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3439 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3440 	} else if (keylen == AES_KEYSIZE_256) {
3441 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3442 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3443 	} else {
3444 		aeadctx->enckey_len = 0;
3445 		return	-EINVAL;
3446 	}
3447 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3448 						key_ctx_size >> 4);
3449 	memcpy(aeadctx->key, key, keylen);
3450 	aeadctx->enckey_len = keylen;
3451 
3452 	return 0;
3453 }
3454 
chcr_aead_ccm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3455 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3456 				const u8 *key,
3457 				unsigned int keylen)
3458 {
3459 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3460 	int error;
3461 
3462 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3463 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3464 			      CRYPTO_TFM_REQ_MASK);
3465 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3466 	if (error)
3467 		return error;
3468 	return chcr_ccm_common_setkey(aead, key, keylen);
3469 }
3470 
chcr_aead_rfc4309_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3471 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3472 				    unsigned int keylen)
3473 {
3474 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3475 	int error;
3476 
3477 	if (keylen < 3) {
3478 		aeadctx->enckey_len = 0;
3479 		return	-EINVAL;
3480 	}
3481 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3482 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3483 			      CRYPTO_TFM_REQ_MASK);
3484 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3485 	if (error)
3486 		return error;
3487 	keylen -= 3;
3488 	memcpy(aeadctx->salt, key + keylen, 3);
3489 	return chcr_ccm_common_setkey(aead, key, keylen);
3490 }
3491 
chcr_gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3492 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3493 			   unsigned int keylen)
3494 {
3495 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3496 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3497 	unsigned int ck_size;
3498 	int ret = 0, key_ctx_size = 0;
3499 	struct crypto_aes_ctx aes;
3500 
3501 	aeadctx->enckey_len = 0;
3502 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3503 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3504 			      & CRYPTO_TFM_REQ_MASK);
3505 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3506 	if (ret)
3507 		goto out;
3508 
3509 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3510 	    keylen > 3) {
3511 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3512 		memcpy(aeadctx->salt, key + keylen, 4);
3513 	}
3514 	if (keylen == AES_KEYSIZE_128) {
3515 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3516 	} else if (keylen == AES_KEYSIZE_192) {
3517 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3518 	} else if (keylen == AES_KEYSIZE_256) {
3519 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3520 	} else {
3521 		pr_err("GCM: Invalid key length %d\n", keylen);
3522 		ret = -EINVAL;
3523 		goto out;
3524 	}
3525 
3526 	memcpy(aeadctx->key, key, keylen);
3527 	aeadctx->enckey_len = keylen;
3528 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3529 		AEAD_H_SIZE;
3530 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3531 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3532 						0, 0,
3533 						key_ctx_size >> 4);
3534 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3535 	 * It will go in key context
3536 	 */
3537 	ret = aes_expandkey(&aes, key, keylen);
3538 	if (ret) {
3539 		aeadctx->enckey_len = 0;
3540 		goto out;
3541 	}
3542 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3543 	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3544 	memzero_explicit(&aes, sizeof(aes));
3545 
3546 out:
3547 	return ret;
3548 }
3549 
chcr_authenc_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3550 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3551 				   unsigned int keylen)
3552 {
3553 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3554 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3555 	/* it contains auth and cipher key both*/
3556 	struct crypto_authenc_keys keys;
3557 	unsigned int bs, subtype;
3558 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3559 	int err = 0, i, key_ctx_len = 0;
3560 	unsigned char ck_size = 0;
3561 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3562 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3563 	struct algo_param param;
3564 	int align;
3565 	u8 *o_ptr = NULL;
3566 
3567 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3568 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3569 			      & CRYPTO_TFM_REQ_MASK);
3570 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3571 	if (err)
3572 		goto out;
3573 
3574 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3575 		goto out;
3576 
3577 	if (get_alg_config(&param, max_authsize)) {
3578 		pr_err("Unsupported digest size\n");
3579 		goto out;
3580 	}
3581 	subtype = get_aead_subtype(authenc);
3582 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3583 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3584 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3585 			goto out;
3586 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3587 		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3588 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3589 	}
3590 	if (keys.enckeylen == AES_KEYSIZE_128) {
3591 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3592 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3593 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3594 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3595 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3596 	} else {
3597 		pr_err("Unsupported cipher key\n");
3598 		goto out;
3599 	}
3600 
3601 	/* Copy only encryption key. We use authkey to generate h(ipad) and
3602 	 * h(opad) so authkey is not needed again. authkeylen size have the
3603 	 * size of the hash digest size.
3604 	 */
3605 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3606 	aeadctx->enckey_len = keys.enckeylen;
3607 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3608 		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3609 
3610 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3611 			    aeadctx->enckey_len << 3);
3612 	}
3613 	base_hash  = chcr_alloc_shash(max_authsize);
3614 	if (IS_ERR(base_hash)) {
3615 		pr_err("Base driver cannot be loaded\n");
3616 		goto out;
3617 	}
3618 	{
3619 		SHASH_DESC_ON_STACK(shash, base_hash);
3620 
3621 		shash->tfm = base_hash;
3622 		bs = crypto_shash_blocksize(base_hash);
3623 		align = KEYCTX_ALIGN_PAD(max_authsize);
3624 		o_ptr =  actx->h_iopad + param.result_size + align;
3625 
3626 		if (keys.authkeylen > bs) {
3627 			err = crypto_shash_digest(shash, keys.authkey,
3628 						  keys.authkeylen,
3629 						  o_ptr);
3630 			if (err) {
3631 				pr_err("Base driver cannot be loaded\n");
3632 				goto out;
3633 			}
3634 			keys.authkeylen = max_authsize;
3635 		} else
3636 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3637 
3638 		/* Compute the ipad-digest*/
3639 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3640 		memcpy(pad, o_ptr, keys.authkeylen);
3641 		for (i = 0; i < bs >> 2; i++)
3642 			*((unsigned int *)pad + i) ^= IPAD_DATA;
3643 
3644 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3645 					      max_authsize))
3646 			goto out;
3647 		/* Compute the opad-digest */
3648 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3649 		memcpy(pad, o_ptr, keys.authkeylen);
3650 		for (i = 0; i < bs >> 2; i++)
3651 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3652 
3653 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3654 			goto out;
3655 
3656 		/* convert the ipad and opad digest to network order */
3657 		chcr_change_order(actx->h_iopad, param.result_size);
3658 		chcr_change_order(o_ptr, param.result_size);
3659 		key_ctx_len = sizeof(struct _key_ctx) +
3660 			roundup(keys.enckeylen, 16) +
3661 			(param.result_size + align) * 2;
3662 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3663 						0, 1, key_ctx_len >> 4);
3664 		actx->auth_mode = param.auth_mode;
3665 		chcr_free_shash(base_hash);
3666 
3667 		memzero_explicit(&keys, sizeof(keys));
3668 		return 0;
3669 	}
3670 out:
3671 	aeadctx->enckey_len = 0;
3672 	memzero_explicit(&keys, sizeof(keys));
3673 	if (!IS_ERR(base_hash))
3674 		chcr_free_shash(base_hash);
3675 	return -EINVAL;
3676 }
3677 
chcr_aead_digest_null_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3678 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3679 					const u8 *key, unsigned int keylen)
3680 {
3681 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3682 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3683 	struct crypto_authenc_keys keys;
3684 	int err;
3685 	/* it contains auth and cipher key both*/
3686 	unsigned int subtype;
3687 	int key_ctx_len = 0;
3688 	unsigned char ck_size = 0;
3689 
3690 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3691 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3692 			      & CRYPTO_TFM_REQ_MASK);
3693 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3694 	if (err)
3695 		goto out;
3696 
3697 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
3698 		goto out;
3699 
3700 	subtype = get_aead_subtype(authenc);
3701 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3702 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3703 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3704 			goto out;
3705 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3706 			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3707 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3708 	}
3709 	if (keys.enckeylen == AES_KEYSIZE_128) {
3710 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3711 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3712 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3713 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3714 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3715 	} else {
3716 		pr_err("Unsupported cipher key %d\n", keys.enckeylen);
3717 		goto out;
3718 	}
3719 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3720 	aeadctx->enckey_len = keys.enckeylen;
3721 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3722 	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3723 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3724 				aeadctx->enckey_len << 3);
3725 	}
3726 	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3727 
3728 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3729 						0, key_ctx_len >> 4);
3730 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3731 	memzero_explicit(&keys, sizeof(keys));
3732 	return 0;
3733 out:
3734 	aeadctx->enckey_len = 0;
3735 	memzero_explicit(&keys, sizeof(keys));
3736 	return -EINVAL;
3737 }
3738 
chcr_aead_op(struct aead_request * req,int size,create_wr_t create_wr_fn)3739 static int chcr_aead_op(struct aead_request *req,
3740 			int size,
3741 			create_wr_t create_wr_fn)
3742 {
3743 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3744 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3745 	struct chcr_context *ctx = a_ctx(tfm);
3746 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
3747 	struct sk_buff *skb;
3748 	struct chcr_dev *cdev;
3749 
3750 	cdev = a_ctx(tfm)->dev;
3751 	if (!cdev) {
3752 		pr_err("%s : No crypto device.\n", __func__);
3753 		return -ENXIO;
3754 	}
3755 
3756 	if (chcr_inc_wrcount(cdev)) {
3757 	/* Detach state for CHCR means lldi or padap is freed.
3758 	 * We cannot increment fallback here.
3759 	 */
3760 		return chcr_aead_fallback(req, reqctx->op);
3761 	}
3762 
3763 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3764 					reqctx->txqidx) &&
3765 		(!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
3766 			chcr_dec_wrcount(cdev);
3767 			return -ENOSPC;
3768 	}
3769 
3770 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3771 	    crypto_ipsec_check_assoclen(req->assoclen) != 0) {
3772 		pr_err("RFC4106: Invalid value of assoclen %d\n",
3773 		       req->assoclen);
3774 		return -EINVAL;
3775 	}
3776 
3777 	/* Form a WR from req */
3778 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size);
3779 
3780 	if (IS_ERR_OR_NULL(skb)) {
3781 		chcr_dec_wrcount(cdev);
3782 		return PTR_ERR_OR_ZERO(skb);
3783 	}
3784 
3785 	skb->dev = u_ctx->lldi.ports[0];
3786 	set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx);
3787 	chcr_send_wr(skb);
3788 	return -EINPROGRESS;
3789 }
3790 
chcr_aead_encrypt(struct aead_request * req)3791 static int chcr_aead_encrypt(struct aead_request *req)
3792 {
3793 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3794 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3795 	struct chcr_context *ctx = a_ctx(tfm);
3796 	unsigned int cpu;
3797 
3798 	cpu = get_cpu();
3799 	reqctx->txqidx = cpu % ctx->ntxq;
3800 	reqctx->rxqidx = cpu % ctx->nrxq;
3801 	put_cpu();
3802 
3803 	reqctx->verify = VERIFY_HW;
3804 	reqctx->op = CHCR_ENCRYPT_OP;
3805 
3806 	switch (get_aead_subtype(tfm)) {
3807 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3808 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3809 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3810 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3811 		return chcr_aead_op(req, 0, create_authenc_wr);
3812 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3813 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3814 		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3815 	default:
3816 		return chcr_aead_op(req, 0, create_gcm_wr);
3817 	}
3818 }
3819 
chcr_aead_decrypt(struct aead_request * req)3820 static int chcr_aead_decrypt(struct aead_request *req)
3821 {
3822 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3823 	struct chcr_context *ctx = a_ctx(tfm);
3824 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
3825 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3826 	int size;
3827 	unsigned int cpu;
3828 
3829 	cpu = get_cpu();
3830 	reqctx->txqidx = cpu % ctx->ntxq;
3831 	reqctx->rxqidx = cpu % ctx->nrxq;
3832 	put_cpu();
3833 
3834 	if (aeadctx->mayverify == VERIFY_SW) {
3835 		size = crypto_aead_maxauthsize(tfm);
3836 		reqctx->verify = VERIFY_SW;
3837 	} else {
3838 		size = 0;
3839 		reqctx->verify = VERIFY_HW;
3840 	}
3841 	reqctx->op = CHCR_DECRYPT_OP;
3842 	switch (get_aead_subtype(tfm)) {
3843 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3844 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3845 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3846 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3847 		return chcr_aead_op(req, size, create_authenc_wr);
3848 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3849 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3850 		return chcr_aead_op(req, size, create_aead_ccm_wr);
3851 	default:
3852 		return chcr_aead_op(req, size, create_gcm_wr);
3853 	}
3854 }
3855 
3856 static struct chcr_alg_template driver_algs[] = {
3857 	/* AES-CBC */
3858 	{
3859 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3860 		.is_registered = 0,
3861 		.alg.skcipher = {
3862 			.base.cra_name		= "cbc(aes)",
3863 			.base.cra_driver_name	= "cbc-aes-chcr",
3864 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3865 
3866 			.init			= chcr_init_tfm,
3867 			.exit			= chcr_exit_tfm,
3868 			.min_keysize		= AES_MIN_KEY_SIZE,
3869 			.max_keysize		= AES_MAX_KEY_SIZE,
3870 			.ivsize			= AES_BLOCK_SIZE,
3871 			.setkey			= chcr_aes_cbc_setkey,
3872 			.encrypt		= chcr_aes_encrypt,
3873 			.decrypt		= chcr_aes_decrypt,
3874 			}
3875 	},
3876 	{
3877 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3878 		.is_registered = 0,
3879 		.alg.skcipher = {
3880 			.base.cra_name		= "xts(aes)",
3881 			.base.cra_driver_name	= "xts-aes-chcr",
3882 			.base.cra_blocksize	= AES_BLOCK_SIZE,
3883 
3884 			.init			= chcr_init_tfm,
3885 			.exit			= chcr_exit_tfm,
3886 			.min_keysize		= 2 * AES_MIN_KEY_SIZE,
3887 			.max_keysize		= 2 * AES_MAX_KEY_SIZE,
3888 			.ivsize			= AES_BLOCK_SIZE,
3889 			.setkey			= chcr_aes_xts_setkey,
3890 			.encrypt		= chcr_aes_encrypt,
3891 			.decrypt		= chcr_aes_decrypt,
3892 			}
3893 	},
3894 	{
3895 		.type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3896 		.is_registered = 0,
3897 		.alg.skcipher = {
3898 			.base.cra_name		= "ctr(aes)",
3899 			.base.cra_driver_name	= "ctr-aes-chcr",
3900 			.base.cra_blocksize	= 1,
3901 
3902 			.init			= chcr_init_tfm,
3903 			.exit			= chcr_exit_tfm,
3904 			.min_keysize		= AES_MIN_KEY_SIZE,
3905 			.max_keysize		= AES_MAX_KEY_SIZE,
3906 			.ivsize			= AES_BLOCK_SIZE,
3907 			.setkey			= chcr_aes_ctr_setkey,
3908 			.encrypt		= chcr_aes_encrypt,
3909 			.decrypt		= chcr_aes_decrypt,
3910 		}
3911 	},
3912 	{
3913 		.type = CRYPTO_ALG_TYPE_SKCIPHER |
3914 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3915 		.is_registered = 0,
3916 		.alg.skcipher = {
3917 			.base.cra_name		= "rfc3686(ctr(aes))",
3918 			.base.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3919 			.base.cra_blocksize	= 1,
3920 
3921 			.init			= chcr_rfc3686_init,
3922 			.exit			= chcr_exit_tfm,
3923 			.min_keysize		= AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3924 			.max_keysize		= AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
3925 			.ivsize			= CTR_RFC3686_IV_SIZE,
3926 			.setkey			= chcr_aes_rfc3686_setkey,
3927 			.encrypt		= chcr_aes_encrypt,
3928 			.decrypt		= chcr_aes_decrypt,
3929 		}
3930 	},
3931 	/* SHA */
3932 	{
3933 		.type = CRYPTO_ALG_TYPE_AHASH,
3934 		.is_registered = 0,
3935 		.alg.hash = {
3936 			.halg.digestsize = SHA1_DIGEST_SIZE,
3937 			.halg.base = {
3938 				.cra_name = "sha1",
3939 				.cra_driver_name = "sha1-chcr",
3940 				.cra_blocksize = SHA1_BLOCK_SIZE,
3941 			}
3942 		}
3943 	},
3944 	{
3945 		.type = CRYPTO_ALG_TYPE_AHASH,
3946 		.is_registered = 0,
3947 		.alg.hash = {
3948 			.halg.digestsize = SHA256_DIGEST_SIZE,
3949 			.halg.base = {
3950 				.cra_name = "sha256",
3951 				.cra_driver_name = "sha256-chcr",
3952 				.cra_blocksize = SHA256_BLOCK_SIZE,
3953 			}
3954 		}
3955 	},
3956 	{
3957 		.type = CRYPTO_ALG_TYPE_AHASH,
3958 		.is_registered = 0,
3959 		.alg.hash = {
3960 			.halg.digestsize = SHA224_DIGEST_SIZE,
3961 			.halg.base = {
3962 				.cra_name = "sha224",
3963 				.cra_driver_name = "sha224-chcr",
3964 				.cra_blocksize = SHA224_BLOCK_SIZE,
3965 			}
3966 		}
3967 	},
3968 	{
3969 		.type = CRYPTO_ALG_TYPE_AHASH,
3970 		.is_registered = 0,
3971 		.alg.hash = {
3972 			.halg.digestsize = SHA384_DIGEST_SIZE,
3973 			.halg.base = {
3974 				.cra_name = "sha384",
3975 				.cra_driver_name = "sha384-chcr",
3976 				.cra_blocksize = SHA384_BLOCK_SIZE,
3977 			}
3978 		}
3979 	},
3980 	{
3981 		.type = CRYPTO_ALG_TYPE_AHASH,
3982 		.is_registered = 0,
3983 		.alg.hash = {
3984 			.halg.digestsize = SHA512_DIGEST_SIZE,
3985 			.halg.base = {
3986 				.cra_name = "sha512",
3987 				.cra_driver_name = "sha512-chcr",
3988 				.cra_blocksize = SHA512_BLOCK_SIZE,
3989 			}
3990 		}
3991 	},
3992 	/* HMAC */
3993 	{
3994 		.type = CRYPTO_ALG_TYPE_HMAC,
3995 		.is_registered = 0,
3996 		.alg.hash = {
3997 			.halg.digestsize = SHA1_DIGEST_SIZE,
3998 			.halg.base = {
3999 				.cra_name = "hmac(sha1)",
4000 				.cra_driver_name = "hmac-sha1-chcr",
4001 				.cra_blocksize = SHA1_BLOCK_SIZE,
4002 			}
4003 		}
4004 	},
4005 	{
4006 		.type = CRYPTO_ALG_TYPE_HMAC,
4007 		.is_registered = 0,
4008 		.alg.hash = {
4009 			.halg.digestsize = SHA224_DIGEST_SIZE,
4010 			.halg.base = {
4011 				.cra_name = "hmac(sha224)",
4012 				.cra_driver_name = "hmac-sha224-chcr",
4013 				.cra_blocksize = SHA224_BLOCK_SIZE,
4014 			}
4015 		}
4016 	},
4017 	{
4018 		.type = CRYPTO_ALG_TYPE_HMAC,
4019 		.is_registered = 0,
4020 		.alg.hash = {
4021 			.halg.digestsize = SHA256_DIGEST_SIZE,
4022 			.halg.base = {
4023 				.cra_name = "hmac(sha256)",
4024 				.cra_driver_name = "hmac-sha256-chcr",
4025 				.cra_blocksize = SHA256_BLOCK_SIZE,
4026 			}
4027 		}
4028 	},
4029 	{
4030 		.type = CRYPTO_ALG_TYPE_HMAC,
4031 		.is_registered = 0,
4032 		.alg.hash = {
4033 			.halg.digestsize = SHA384_DIGEST_SIZE,
4034 			.halg.base = {
4035 				.cra_name = "hmac(sha384)",
4036 				.cra_driver_name = "hmac-sha384-chcr",
4037 				.cra_blocksize = SHA384_BLOCK_SIZE,
4038 			}
4039 		}
4040 	},
4041 	{
4042 		.type = CRYPTO_ALG_TYPE_HMAC,
4043 		.is_registered = 0,
4044 		.alg.hash = {
4045 			.halg.digestsize = SHA512_DIGEST_SIZE,
4046 			.halg.base = {
4047 				.cra_name = "hmac(sha512)",
4048 				.cra_driver_name = "hmac-sha512-chcr",
4049 				.cra_blocksize = SHA512_BLOCK_SIZE,
4050 			}
4051 		}
4052 	},
4053 	/* Add AEAD Algorithms */
4054 	{
4055 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
4056 		.is_registered = 0,
4057 		.alg.aead = {
4058 			.base = {
4059 				.cra_name = "gcm(aes)",
4060 				.cra_driver_name = "gcm-aes-chcr",
4061 				.cra_blocksize	= 1,
4062 				.cra_priority = CHCR_AEAD_PRIORITY,
4063 				.cra_ctxsize =	sizeof(struct chcr_context) +
4064 						sizeof(struct chcr_aead_ctx) +
4065 						sizeof(struct chcr_gcm_ctx),
4066 			},
4067 			.ivsize = GCM_AES_IV_SIZE,
4068 			.maxauthsize = GHASH_DIGEST_SIZE,
4069 			.setkey = chcr_gcm_setkey,
4070 			.setauthsize = chcr_gcm_setauthsize,
4071 		}
4072 	},
4073 	{
4074 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
4075 		.is_registered = 0,
4076 		.alg.aead = {
4077 			.base = {
4078 				.cra_name = "rfc4106(gcm(aes))",
4079 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
4080 				.cra_blocksize	 = 1,
4081 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4082 				.cra_ctxsize =	sizeof(struct chcr_context) +
4083 						sizeof(struct chcr_aead_ctx) +
4084 						sizeof(struct chcr_gcm_ctx),
4085 
4086 			},
4087 			.ivsize = GCM_RFC4106_IV_SIZE,
4088 			.maxauthsize	= GHASH_DIGEST_SIZE,
4089 			.setkey = chcr_gcm_setkey,
4090 			.setauthsize	= chcr_4106_4309_setauthsize,
4091 		}
4092 	},
4093 	{
4094 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
4095 		.is_registered = 0,
4096 		.alg.aead = {
4097 			.base = {
4098 				.cra_name = "ccm(aes)",
4099 				.cra_driver_name = "ccm-aes-chcr",
4100 				.cra_blocksize	 = 1,
4101 				.cra_priority = CHCR_AEAD_PRIORITY,
4102 				.cra_ctxsize =	sizeof(struct chcr_context) +
4103 						sizeof(struct chcr_aead_ctx),
4104 
4105 			},
4106 			.ivsize = AES_BLOCK_SIZE,
4107 			.maxauthsize	= GHASH_DIGEST_SIZE,
4108 			.setkey = chcr_aead_ccm_setkey,
4109 			.setauthsize	= chcr_ccm_setauthsize,
4110 		}
4111 	},
4112 	{
4113 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
4114 		.is_registered = 0,
4115 		.alg.aead = {
4116 			.base = {
4117 				.cra_name = "rfc4309(ccm(aes))",
4118 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
4119 				.cra_blocksize	 = 1,
4120 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
4121 				.cra_ctxsize =	sizeof(struct chcr_context) +
4122 						sizeof(struct chcr_aead_ctx),
4123 
4124 			},
4125 			.ivsize = 8,
4126 			.maxauthsize	= GHASH_DIGEST_SIZE,
4127 			.setkey = chcr_aead_rfc4309_setkey,
4128 			.setauthsize = chcr_4106_4309_setauthsize,
4129 		}
4130 	},
4131 	{
4132 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4133 		.is_registered = 0,
4134 		.alg.aead = {
4135 			.base = {
4136 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
4137 				.cra_driver_name =
4138 					"authenc-hmac-sha1-cbc-aes-chcr",
4139 				.cra_blocksize	 = AES_BLOCK_SIZE,
4140 				.cra_priority = CHCR_AEAD_PRIORITY,
4141 				.cra_ctxsize =	sizeof(struct chcr_context) +
4142 						sizeof(struct chcr_aead_ctx) +
4143 						sizeof(struct chcr_authenc_ctx),
4144 
4145 			},
4146 			.ivsize = AES_BLOCK_SIZE,
4147 			.maxauthsize = SHA1_DIGEST_SIZE,
4148 			.setkey = chcr_authenc_setkey,
4149 			.setauthsize = chcr_authenc_setauthsize,
4150 		}
4151 	},
4152 	{
4153 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4154 		.is_registered = 0,
4155 		.alg.aead = {
4156 			.base = {
4157 
4158 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4159 				.cra_driver_name =
4160 					"authenc-hmac-sha256-cbc-aes-chcr",
4161 				.cra_blocksize	 = AES_BLOCK_SIZE,
4162 				.cra_priority = CHCR_AEAD_PRIORITY,
4163 				.cra_ctxsize =	sizeof(struct chcr_context) +
4164 						sizeof(struct chcr_aead_ctx) +
4165 						sizeof(struct chcr_authenc_ctx),
4166 
4167 			},
4168 			.ivsize = AES_BLOCK_SIZE,
4169 			.maxauthsize	= SHA256_DIGEST_SIZE,
4170 			.setkey = chcr_authenc_setkey,
4171 			.setauthsize = chcr_authenc_setauthsize,
4172 		}
4173 	},
4174 	{
4175 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4176 		.is_registered = 0,
4177 		.alg.aead = {
4178 			.base = {
4179 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4180 				.cra_driver_name =
4181 					"authenc-hmac-sha224-cbc-aes-chcr",
4182 				.cra_blocksize	 = AES_BLOCK_SIZE,
4183 				.cra_priority = CHCR_AEAD_PRIORITY,
4184 				.cra_ctxsize =	sizeof(struct chcr_context) +
4185 						sizeof(struct chcr_aead_ctx) +
4186 						sizeof(struct chcr_authenc_ctx),
4187 			},
4188 			.ivsize = AES_BLOCK_SIZE,
4189 			.maxauthsize = SHA224_DIGEST_SIZE,
4190 			.setkey = chcr_authenc_setkey,
4191 			.setauthsize = chcr_authenc_setauthsize,
4192 		}
4193 	},
4194 	{
4195 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4196 		.is_registered = 0,
4197 		.alg.aead = {
4198 			.base = {
4199 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4200 				.cra_driver_name =
4201 					"authenc-hmac-sha384-cbc-aes-chcr",
4202 				.cra_blocksize	 = AES_BLOCK_SIZE,
4203 				.cra_priority = CHCR_AEAD_PRIORITY,
4204 				.cra_ctxsize =	sizeof(struct chcr_context) +
4205 						sizeof(struct chcr_aead_ctx) +
4206 						sizeof(struct chcr_authenc_ctx),
4207 
4208 			},
4209 			.ivsize = AES_BLOCK_SIZE,
4210 			.maxauthsize = SHA384_DIGEST_SIZE,
4211 			.setkey = chcr_authenc_setkey,
4212 			.setauthsize = chcr_authenc_setauthsize,
4213 		}
4214 	},
4215 	{
4216 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4217 		.is_registered = 0,
4218 		.alg.aead = {
4219 			.base = {
4220 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4221 				.cra_driver_name =
4222 					"authenc-hmac-sha512-cbc-aes-chcr",
4223 				.cra_blocksize	 = AES_BLOCK_SIZE,
4224 				.cra_priority = CHCR_AEAD_PRIORITY,
4225 				.cra_ctxsize =	sizeof(struct chcr_context) +
4226 						sizeof(struct chcr_aead_ctx) +
4227 						sizeof(struct chcr_authenc_ctx),
4228 
4229 			},
4230 			.ivsize = AES_BLOCK_SIZE,
4231 			.maxauthsize = SHA512_DIGEST_SIZE,
4232 			.setkey = chcr_authenc_setkey,
4233 			.setauthsize = chcr_authenc_setauthsize,
4234 		}
4235 	},
4236 	{
4237 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4238 		.is_registered = 0,
4239 		.alg.aead = {
4240 			.base = {
4241 				.cra_name = "authenc(digest_null,cbc(aes))",
4242 				.cra_driver_name =
4243 					"authenc-digest_null-cbc-aes-chcr",
4244 				.cra_blocksize	 = AES_BLOCK_SIZE,
4245 				.cra_priority = CHCR_AEAD_PRIORITY,
4246 				.cra_ctxsize =	sizeof(struct chcr_context) +
4247 						sizeof(struct chcr_aead_ctx) +
4248 						sizeof(struct chcr_authenc_ctx),
4249 
4250 			},
4251 			.ivsize  = AES_BLOCK_SIZE,
4252 			.maxauthsize = 0,
4253 			.setkey  = chcr_aead_digest_null_setkey,
4254 			.setauthsize = chcr_authenc_null_setauthsize,
4255 		}
4256 	},
4257 	{
4258 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4259 		.is_registered = 0,
4260 		.alg.aead = {
4261 			.base = {
4262 				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4263 				.cra_driver_name =
4264 				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4265 				.cra_blocksize	 = 1,
4266 				.cra_priority = CHCR_AEAD_PRIORITY,
4267 				.cra_ctxsize =	sizeof(struct chcr_context) +
4268 						sizeof(struct chcr_aead_ctx) +
4269 						sizeof(struct chcr_authenc_ctx),
4270 
4271 			},
4272 			.ivsize = CTR_RFC3686_IV_SIZE,
4273 			.maxauthsize = SHA1_DIGEST_SIZE,
4274 			.setkey = chcr_authenc_setkey,
4275 			.setauthsize = chcr_authenc_setauthsize,
4276 		}
4277 	},
4278 	{
4279 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4280 		.is_registered = 0,
4281 		.alg.aead = {
4282 			.base = {
4283 
4284 				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4285 				.cra_driver_name =
4286 				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4287 				.cra_blocksize	 = 1,
4288 				.cra_priority = CHCR_AEAD_PRIORITY,
4289 				.cra_ctxsize =	sizeof(struct chcr_context) +
4290 						sizeof(struct chcr_aead_ctx) +
4291 						sizeof(struct chcr_authenc_ctx),
4292 
4293 			},
4294 			.ivsize = CTR_RFC3686_IV_SIZE,
4295 			.maxauthsize	= SHA256_DIGEST_SIZE,
4296 			.setkey = chcr_authenc_setkey,
4297 			.setauthsize = chcr_authenc_setauthsize,
4298 		}
4299 	},
4300 	{
4301 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4302 		.is_registered = 0,
4303 		.alg.aead = {
4304 			.base = {
4305 				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4306 				.cra_driver_name =
4307 				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4308 				.cra_blocksize	 = 1,
4309 				.cra_priority = CHCR_AEAD_PRIORITY,
4310 				.cra_ctxsize =	sizeof(struct chcr_context) +
4311 						sizeof(struct chcr_aead_ctx) +
4312 						sizeof(struct chcr_authenc_ctx),
4313 			},
4314 			.ivsize = CTR_RFC3686_IV_SIZE,
4315 			.maxauthsize = SHA224_DIGEST_SIZE,
4316 			.setkey = chcr_authenc_setkey,
4317 			.setauthsize = chcr_authenc_setauthsize,
4318 		}
4319 	},
4320 	{
4321 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4322 		.is_registered = 0,
4323 		.alg.aead = {
4324 			.base = {
4325 				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4326 				.cra_driver_name =
4327 				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4328 				.cra_blocksize	 = 1,
4329 				.cra_priority = CHCR_AEAD_PRIORITY,
4330 				.cra_ctxsize =	sizeof(struct chcr_context) +
4331 						sizeof(struct chcr_aead_ctx) +
4332 						sizeof(struct chcr_authenc_ctx),
4333 
4334 			},
4335 			.ivsize = CTR_RFC3686_IV_SIZE,
4336 			.maxauthsize = SHA384_DIGEST_SIZE,
4337 			.setkey = chcr_authenc_setkey,
4338 			.setauthsize = chcr_authenc_setauthsize,
4339 		}
4340 	},
4341 	{
4342 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4343 		.is_registered = 0,
4344 		.alg.aead = {
4345 			.base = {
4346 				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4347 				.cra_driver_name =
4348 				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4349 				.cra_blocksize	 = 1,
4350 				.cra_priority = CHCR_AEAD_PRIORITY,
4351 				.cra_ctxsize =	sizeof(struct chcr_context) +
4352 						sizeof(struct chcr_aead_ctx) +
4353 						sizeof(struct chcr_authenc_ctx),
4354 
4355 			},
4356 			.ivsize = CTR_RFC3686_IV_SIZE,
4357 			.maxauthsize = SHA512_DIGEST_SIZE,
4358 			.setkey = chcr_authenc_setkey,
4359 			.setauthsize = chcr_authenc_setauthsize,
4360 		}
4361 	},
4362 	{
4363 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4364 		.is_registered = 0,
4365 		.alg.aead = {
4366 			.base = {
4367 				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4368 				.cra_driver_name =
4369 				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4370 				.cra_blocksize	 = 1,
4371 				.cra_priority = CHCR_AEAD_PRIORITY,
4372 				.cra_ctxsize =	sizeof(struct chcr_context) +
4373 						sizeof(struct chcr_aead_ctx) +
4374 						sizeof(struct chcr_authenc_ctx),
4375 
4376 			},
4377 			.ivsize  = CTR_RFC3686_IV_SIZE,
4378 			.maxauthsize = 0,
4379 			.setkey  = chcr_aead_digest_null_setkey,
4380 			.setauthsize = chcr_authenc_null_setauthsize,
4381 		}
4382 	},
4383 };
4384 
4385 /*
4386  *	chcr_unregister_alg - Deregister crypto algorithms with
4387  *	kernel framework.
4388  */
chcr_unregister_alg(void)4389 static int chcr_unregister_alg(void)
4390 {
4391 	int i;
4392 
4393 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4394 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4395 		case CRYPTO_ALG_TYPE_SKCIPHER:
4396 			if (driver_algs[i].is_registered && refcount_read(
4397 			    &driver_algs[i].alg.skcipher.base.cra_refcnt)
4398 			    == 1) {
4399 				crypto_unregister_skcipher(
4400 						&driver_algs[i].alg.skcipher);
4401 				driver_algs[i].is_registered = 0;
4402 			}
4403 			break;
4404 		case CRYPTO_ALG_TYPE_AEAD:
4405 			if (driver_algs[i].is_registered && refcount_read(
4406 			    &driver_algs[i].alg.aead.base.cra_refcnt) == 1) {
4407 				crypto_unregister_aead(
4408 						&driver_algs[i].alg.aead);
4409 				driver_algs[i].is_registered = 0;
4410 			}
4411 			break;
4412 		case CRYPTO_ALG_TYPE_AHASH:
4413 			if (driver_algs[i].is_registered && refcount_read(
4414 			    &driver_algs[i].alg.hash.halg.base.cra_refcnt)
4415 			    == 1) {
4416 				crypto_unregister_ahash(
4417 						&driver_algs[i].alg.hash);
4418 				driver_algs[i].is_registered = 0;
4419 			}
4420 			break;
4421 		}
4422 	}
4423 	return 0;
4424 }
4425 
4426 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4427 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4428 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4429 
4430 /*
4431  *	chcr_register_alg - Register crypto algorithms with kernel framework.
4432  */
chcr_register_alg(void)4433 static int chcr_register_alg(void)
4434 {
4435 	struct crypto_alg ai;
4436 	struct ahash_alg *a_hash;
4437 	int err = 0, i;
4438 	char *name = NULL;
4439 
4440 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4441 		if (driver_algs[i].is_registered)
4442 			continue;
4443 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4444 		case CRYPTO_ALG_TYPE_SKCIPHER:
4445 			driver_algs[i].alg.skcipher.base.cra_priority =
4446 				CHCR_CRA_PRIORITY;
4447 			driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE;
4448 			driver_algs[i].alg.skcipher.base.cra_flags =
4449 				CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
4450 				CRYPTO_ALG_ALLOCATES_MEMORY |
4451 				CRYPTO_ALG_NEED_FALLBACK;
4452 			driver_algs[i].alg.skcipher.base.cra_ctxsize =
4453 				sizeof(struct chcr_context) +
4454 				sizeof(struct ablk_ctx);
4455 			driver_algs[i].alg.skcipher.base.cra_alignmask = 0;
4456 
4457 			err = crypto_register_skcipher(&driver_algs[i].alg.skcipher);
4458 			name = driver_algs[i].alg.skcipher.base.cra_driver_name;
4459 			break;
4460 		case CRYPTO_ALG_TYPE_AEAD:
4461 			driver_algs[i].alg.aead.base.cra_flags =
4462 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK |
4463 				CRYPTO_ALG_ALLOCATES_MEMORY;
4464 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4465 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4466 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4467 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4468 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4469 			err = crypto_register_aead(&driver_algs[i].alg.aead);
4470 			name = driver_algs[i].alg.aead.base.cra_driver_name;
4471 			break;
4472 		case CRYPTO_ALG_TYPE_AHASH:
4473 			a_hash = &driver_algs[i].alg.hash;
4474 			a_hash->update = chcr_ahash_update;
4475 			a_hash->final = chcr_ahash_final;
4476 			a_hash->finup = chcr_ahash_finup;
4477 			a_hash->digest = chcr_ahash_digest;
4478 			a_hash->export = chcr_ahash_export;
4479 			a_hash->import = chcr_ahash_import;
4480 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4481 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4482 			a_hash->halg.base.cra_module = THIS_MODULE;
4483 			a_hash->halg.base.cra_flags =
4484 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4485 			a_hash->halg.base.cra_alignmask = 0;
4486 			a_hash->halg.base.cra_exit = NULL;
4487 
4488 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4489 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4490 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4491 				a_hash->init = chcr_hmac_init;
4492 				a_hash->setkey = chcr_ahash_setkey;
4493 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4494 			} else {
4495 				a_hash->init = chcr_sha_init;
4496 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4497 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4498 			}
4499 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4500 			ai = driver_algs[i].alg.hash.halg.base;
4501 			name = ai.cra_driver_name;
4502 			break;
4503 		}
4504 		if (err) {
4505 			pr_err("%s : Algorithm registration failed\n", name);
4506 			goto register_err;
4507 		} else {
4508 			driver_algs[i].is_registered = 1;
4509 		}
4510 	}
4511 	return 0;
4512 
4513 register_err:
4514 	chcr_unregister_alg();
4515 	return err;
4516 }
4517 
4518 /*
4519  *	start_crypto - Register the crypto algorithms.
4520  *	This should called once when the first device comesup. After this
4521  *	kernel will start calling driver APIs for crypto operations.
4522  */
start_crypto(void)4523 int start_crypto(void)
4524 {
4525 	return chcr_register_alg();
4526 }
4527 
4528 /*
4529  *	stop_crypto - Deregister all the crypto algorithms with kernel.
4530  *	This should be called once when the last device goes down. After this
4531  *	kernel will not call the driver API for crypto operations.
4532  */
stop_crypto(void)4533 int stop_crypto(void)
4534 {
4535 	chcr_unregister_alg();
4536 	return 0;
4537 }
4538