1 /*
2 * This file is part of the Chelsio T6 Crypto driver for Linux.
3 *
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * Written and Maintained by:
35 * Manoj Malviya (manojmalviya@chelsio.com)
36 * Atul Gupta (atul.gupta@chelsio.com)
37 * Jitendra Lulla (jlulla@chelsio.com)
38 * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39 * Harsh Jain (harsh@chelsio.com)
40 */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73
74 #define IV AES_BLOCK_SIZE
75
76 static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
81 };
82
83 static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
88 };
89
90 static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
94 };
95
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97 unsigned char *input, int err);
98
AEAD_CTX(struct chcr_context * ctx)99 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101 return ctx->crypto_ctx->aeadctx;
102 }
103
ABLK_CTX(struct chcr_context * ctx)104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106 return ctx->crypto_ctx->ablkctx;
107 }
108
HMAC_CTX(struct chcr_context * ctx)109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111 return ctx->crypto_ctx->hmacctx;
112 }
113
GCM_CTX(struct chcr_aead_ctx * gctx)114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116 return gctx->ctx->gcm;
117 }
118
AUTHENC_CTX(struct chcr_aead_ctx * gctx)119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121 return gctx->ctx->authenc;
122 }
123
ULD_CTX(struct chcr_context * ctx)124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126 return ctx->dev->u_ctx;
127 }
128
is_ofld_imm(const struct sk_buff * skb)129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131 return (skb->len <= SGE_MAX_WR_LEN);
132 }
133
chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx * reqctx)134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138
sg_nents_xlen(struct scatterlist * sg,unsigned int reqlen,unsigned int entlen,unsigned int skip)139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140 unsigned int entlen,
141 unsigned int skip)
142 {
143 int nents = 0;
144 unsigned int less;
145 unsigned int skip_len = 0;
146
147 while (sg && skip) {
148 if (sg_dma_len(sg) <= skip) {
149 skip -= sg_dma_len(sg);
150 skip_len = 0;
151 sg = sg_next(sg);
152 } else {
153 skip_len = skip;
154 skip = 0;
155 }
156 }
157
158 while (sg && reqlen) {
159 less = min(reqlen, sg_dma_len(sg) - skip_len);
160 nents += DIV_ROUND_UP(less, entlen);
161 reqlen -= less;
162 skip_len = 0;
163 sg = sg_next(sg);
164 }
165 return nents;
166 }
167
get_aead_subtype(struct crypto_aead * aead)168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170 struct aead_alg *alg = crypto_aead_alg(aead);
171 struct chcr_alg_template *chcr_crypto_alg =
172 container_of(alg, struct chcr_alg_template, alg.aead);
173 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175
chcr_verify_tag(struct aead_request * req,u8 * input,int * err)176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178 u8 temp[SHA512_DIGEST_SIZE];
179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 int authsize = crypto_aead_authsize(tfm);
181 struct cpl_fw6_pld *fw6_pld;
182 int cmp = 0;
183
184 fw6_pld = (struct cpl_fw6_pld *)input;
185 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188 } else {
189
190 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 authsize, req->assoclen +
192 req->cryptlen - authsize);
193 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194 }
195 if (cmp)
196 *err = -EBADMSG;
197 else
198 *err = 0;
199 }
200
chcr_handle_aead_resp(struct aead_request * req,unsigned char * input,int err)201 static inline void chcr_handle_aead_resp(struct aead_request *req,
202 unsigned char *input,
203 int err)
204 {
205 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
206
207 chcr_aead_common_exit(req);
208 if (reqctx->verify == VERIFY_SW) {
209 chcr_verify_tag(req, input, &err);
210 reqctx->verify = VERIFY_HW;
211 }
212 req->base.complete(&req->base, err);
213 }
214
get_aes_decrypt_key(unsigned char * dec_key,const unsigned char * key,unsigned int keylength)215 static void get_aes_decrypt_key(unsigned char *dec_key,
216 const unsigned char *key,
217 unsigned int keylength)
218 {
219 u32 temp;
220 u32 w_ring[MAX_NK];
221 int i, j, k;
222 u8 nr, nk;
223
224 switch (keylength) {
225 case AES_KEYLENGTH_128BIT:
226 nk = KEYLENGTH_4BYTES;
227 nr = NUMBER_OF_ROUNDS_10;
228 break;
229 case AES_KEYLENGTH_192BIT:
230 nk = KEYLENGTH_6BYTES;
231 nr = NUMBER_OF_ROUNDS_12;
232 break;
233 case AES_KEYLENGTH_256BIT:
234 nk = KEYLENGTH_8BYTES;
235 nr = NUMBER_OF_ROUNDS_14;
236 break;
237 default:
238 return;
239 }
240 for (i = 0; i < nk; i++)
241 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
242
243 i = 0;
244 temp = w_ring[nk - 1];
245 while (i + nk < (nr + 1) * 4) {
246 if (!(i % nk)) {
247 /* RotWord(temp) */
248 temp = (temp << 8) | (temp >> 24);
249 temp = aes_ks_subword(temp);
250 temp ^= round_constant[i / nk];
251 } else if (nk == 8 && (i % 4 == 0)) {
252 temp = aes_ks_subword(temp);
253 }
254 w_ring[i % nk] ^= temp;
255 temp = w_ring[i % nk];
256 i++;
257 }
258 i--;
259 for (k = 0, j = i % nk; k < nk; k++) {
260 *((u32 *)dec_key + k) = htonl(w_ring[j]);
261 j--;
262 if (j < 0)
263 j += nk;
264 }
265 }
266
chcr_alloc_shash(unsigned int ds)267 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
268 {
269 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
270
271 switch (ds) {
272 case SHA1_DIGEST_SIZE:
273 base_hash = crypto_alloc_shash("sha1", 0, 0);
274 break;
275 case SHA224_DIGEST_SIZE:
276 base_hash = crypto_alloc_shash("sha224", 0, 0);
277 break;
278 case SHA256_DIGEST_SIZE:
279 base_hash = crypto_alloc_shash("sha256", 0, 0);
280 break;
281 case SHA384_DIGEST_SIZE:
282 base_hash = crypto_alloc_shash("sha384", 0, 0);
283 break;
284 case SHA512_DIGEST_SIZE:
285 base_hash = crypto_alloc_shash("sha512", 0, 0);
286 break;
287 }
288
289 return base_hash;
290 }
291
chcr_compute_partial_hash(struct shash_desc * desc,char * iopad,char * result_hash,int digest_size)292 static int chcr_compute_partial_hash(struct shash_desc *desc,
293 char *iopad, char *result_hash,
294 int digest_size)
295 {
296 struct sha1_state sha1_st;
297 struct sha256_state sha256_st;
298 struct sha512_state sha512_st;
299 int error;
300
301 if (digest_size == SHA1_DIGEST_SIZE) {
302 error = crypto_shash_init(desc) ?:
303 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
304 crypto_shash_export(desc, (void *)&sha1_st);
305 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
306 } else if (digest_size == SHA224_DIGEST_SIZE) {
307 error = crypto_shash_init(desc) ?:
308 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
309 crypto_shash_export(desc, (void *)&sha256_st);
310 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
311
312 } else if (digest_size == SHA256_DIGEST_SIZE) {
313 error = crypto_shash_init(desc) ?:
314 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
315 crypto_shash_export(desc, (void *)&sha256_st);
316 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
317
318 } else if (digest_size == SHA384_DIGEST_SIZE) {
319 error = crypto_shash_init(desc) ?:
320 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
321 crypto_shash_export(desc, (void *)&sha512_st);
322 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
323
324 } else if (digest_size == SHA512_DIGEST_SIZE) {
325 error = crypto_shash_init(desc) ?:
326 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
327 crypto_shash_export(desc, (void *)&sha512_st);
328 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
329 } else {
330 error = -EINVAL;
331 pr_err("Unknown digest size %d\n", digest_size);
332 }
333 return error;
334 }
335
chcr_change_order(char * buf,int ds)336 static void chcr_change_order(char *buf, int ds)
337 {
338 int i;
339
340 if (ds == SHA512_DIGEST_SIZE) {
341 for (i = 0; i < (ds / sizeof(u64)); i++)
342 *((__be64 *)buf + i) =
343 cpu_to_be64(*((u64 *)buf + i));
344 } else {
345 for (i = 0; i < (ds / sizeof(u32)); i++)
346 *((__be32 *)buf + i) =
347 cpu_to_be32(*((u32 *)buf + i));
348 }
349 }
350
is_hmac(struct crypto_tfm * tfm)351 static inline int is_hmac(struct crypto_tfm *tfm)
352 {
353 struct crypto_alg *alg = tfm->__crt_alg;
354 struct chcr_alg_template *chcr_crypto_alg =
355 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
356 alg.hash);
357 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
358 return 1;
359 return 0;
360 }
361
dsgl_walk_init(struct dsgl_walk * walk,struct cpl_rx_phys_dsgl * dsgl)362 static inline void dsgl_walk_init(struct dsgl_walk *walk,
363 struct cpl_rx_phys_dsgl *dsgl)
364 {
365 walk->dsgl = dsgl;
366 walk->nents = 0;
367 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
368 }
369
dsgl_walk_end(struct dsgl_walk * walk,unsigned short qid,int pci_chan_id)370 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
371 int pci_chan_id)
372 {
373 struct cpl_rx_phys_dsgl *phys_cpl;
374
375 phys_cpl = walk->dsgl;
376
377 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
378 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
379 phys_cpl->pcirlxorder_to_noofsgentr =
380 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
381 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
382 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
383 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
384 CPL_RX_PHYS_DSGL_DCAID_V(0) |
385 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
386 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
387 phys_cpl->rss_hdr_int.qid = htons(qid);
388 phys_cpl->rss_hdr_int.hash_val = 0;
389 phys_cpl->rss_hdr_int.channel = pci_chan_id;
390 }
391
dsgl_walk_add_page(struct dsgl_walk * walk,size_t size,dma_addr_t * addr)392 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
393 size_t size,
394 dma_addr_t *addr)
395 {
396 int j;
397
398 if (!size)
399 return;
400 j = walk->nents;
401 walk->to->len[j % 8] = htons(size);
402 walk->to->addr[j % 8] = cpu_to_be64(*addr);
403 j++;
404 if ((j % 8) == 0)
405 walk->to++;
406 walk->nents = j;
407 }
408
dsgl_walk_add_sg(struct dsgl_walk * walk,struct scatterlist * sg,unsigned int slen,unsigned int skip)409 static void dsgl_walk_add_sg(struct dsgl_walk *walk,
410 struct scatterlist *sg,
411 unsigned int slen,
412 unsigned int skip)
413 {
414 int skip_len = 0;
415 unsigned int left_size = slen, len = 0;
416 unsigned int j = walk->nents;
417 int offset, ent_len;
418
419 if (!slen)
420 return;
421 while (sg && skip) {
422 if (sg_dma_len(sg) <= skip) {
423 skip -= sg_dma_len(sg);
424 skip_len = 0;
425 sg = sg_next(sg);
426 } else {
427 skip_len = skip;
428 skip = 0;
429 }
430 }
431
432 while (left_size && sg) {
433 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
434 offset = 0;
435 while (len) {
436 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
437 walk->to->len[j % 8] = htons(ent_len);
438 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
439 offset + skip_len);
440 offset += ent_len;
441 len -= ent_len;
442 j++;
443 if ((j % 8) == 0)
444 walk->to++;
445 }
446 walk->last_sg = sg;
447 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
448 skip_len) + skip_len;
449 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
450 skip_len = 0;
451 sg = sg_next(sg);
452 }
453 walk->nents = j;
454 }
455
ulptx_walk_init(struct ulptx_walk * walk,struct ulptx_sgl * ulp)456 static inline void ulptx_walk_init(struct ulptx_walk *walk,
457 struct ulptx_sgl *ulp)
458 {
459 walk->sgl = ulp;
460 walk->nents = 0;
461 walk->pair_idx = 0;
462 walk->pair = ulp->sge;
463 walk->last_sg = NULL;
464 walk->last_sg_len = 0;
465 }
466
ulptx_walk_end(struct ulptx_walk * walk)467 static inline void ulptx_walk_end(struct ulptx_walk *walk)
468 {
469 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
470 ULPTX_NSGE_V(walk->nents));
471 }
472
473
ulptx_walk_add_page(struct ulptx_walk * walk,size_t size,dma_addr_t * addr)474 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
475 size_t size,
476 dma_addr_t *addr)
477 {
478 if (!size)
479 return;
480
481 if (walk->nents == 0) {
482 walk->sgl->len0 = cpu_to_be32(size);
483 walk->sgl->addr0 = cpu_to_be64(*addr);
484 } else {
485 walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
486 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
487 walk->pair_idx = !walk->pair_idx;
488 if (!walk->pair_idx)
489 walk->pair++;
490 }
491 walk->nents++;
492 }
493
ulptx_walk_add_sg(struct ulptx_walk * walk,struct scatterlist * sg,unsigned int len,unsigned int skip)494 static void ulptx_walk_add_sg(struct ulptx_walk *walk,
495 struct scatterlist *sg,
496 unsigned int len,
497 unsigned int skip)
498 {
499 int small;
500 int skip_len = 0;
501 unsigned int sgmin;
502
503 if (!len)
504 return;
505 while (sg && skip) {
506 if (sg_dma_len(sg) <= skip) {
507 skip -= sg_dma_len(sg);
508 skip_len = 0;
509 sg = sg_next(sg);
510 } else {
511 skip_len = skip;
512 skip = 0;
513 }
514 }
515 WARN(!sg, "SG should not be null here\n");
516 if (sg && (walk->nents == 0)) {
517 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
518 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
519 walk->sgl->len0 = cpu_to_be32(sgmin);
520 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
521 walk->nents++;
522 len -= sgmin;
523 walk->last_sg = sg;
524 walk->last_sg_len = sgmin + skip_len;
525 skip_len += sgmin;
526 if (sg_dma_len(sg) == skip_len) {
527 sg = sg_next(sg);
528 skip_len = 0;
529 }
530 }
531
532 while (sg && len) {
533 small = min(sg_dma_len(sg) - skip_len, len);
534 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
535 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
536 walk->pair->addr[walk->pair_idx] =
537 cpu_to_be64(sg_dma_address(sg) + skip_len);
538 walk->pair_idx = !walk->pair_idx;
539 walk->nents++;
540 if (!walk->pair_idx)
541 walk->pair++;
542 len -= sgmin;
543 skip_len += sgmin;
544 walk->last_sg = sg;
545 walk->last_sg_len = skip_len;
546 if (sg_dma_len(sg) == skip_len) {
547 sg = sg_next(sg);
548 skip_len = 0;
549 }
550 }
551 }
552
get_cryptoalg_subtype(struct crypto_tfm * tfm)553 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
554 {
555 struct crypto_alg *alg = tfm->__crt_alg;
556 struct chcr_alg_template *chcr_crypto_alg =
557 container_of(alg, struct chcr_alg_template, alg.crypto);
558
559 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
560 }
561
cxgb4_is_crypto_q_full(struct net_device * dev,unsigned int idx)562 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
563 {
564 struct adapter *adap = netdev2adap(dev);
565 struct sge_uld_txq_info *txq_info =
566 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
567 struct sge_uld_txq *txq;
568 int ret = 0;
569
570 local_bh_disable();
571 txq = &txq_info->uldtxq[idx];
572 spin_lock(&txq->sendq.lock);
573 if (txq->full)
574 ret = -1;
575 spin_unlock(&txq->sendq.lock);
576 local_bh_enable();
577 return ret;
578 }
579
generate_copy_rrkey(struct ablk_ctx * ablkctx,struct _key_ctx * key_ctx)580 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
581 struct _key_ctx *key_ctx)
582 {
583 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
584 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
585 } else {
586 memcpy(key_ctx->key,
587 ablkctx->key + (ablkctx->enckey_len >> 1),
588 ablkctx->enckey_len >> 1);
589 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
590 ablkctx->rrkey, ablkctx->enckey_len >> 1);
591 }
592 return 0;
593 }
594
chcr_hash_ent_in_wr(struct scatterlist * src,unsigned int minsg,unsigned int space,unsigned int srcskip)595 static int chcr_hash_ent_in_wr(struct scatterlist *src,
596 unsigned int minsg,
597 unsigned int space,
598 unsigned int srcskip)
599 {
600 int srclen = 0;
601 int srcsg = minsg;
602 int soffset = 0, sless;
603
604 if (sg_dma_len(src) == srcskip) {
605 src = sg_next(src);
606 srcskip = 0;
607 }
608 while (src && space > (sgl_ent_len[srcsg + 1])) {
609 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
610 CHCR_SRC_SG_SIZE);
611 srclen += sless;
612 soffset += sless;
613 srcsg++;
614 if (sg_dma_len(src) == (soffset + srcskip)) {
615 src = sg_next(src);
616 soffset = 0;
617 srcskip = 0;
618 }
619 }
620 return srclen;
621 }
622
chcr_sg_ent_in_wr(struct scatterlist * src,struct scatterlist * dst,unsigned int minsg,unsigned int space,unsigned int srcskip,unsigned int dstskip)623 static int chcr_sg_ent_in_wr(struct scatterlist *src,
624 struct scatterlist *dst,
625 unsigned int minsg,
626 unsigned int space,
627 unsigned int srcskip,
628 unsigned int dstskip)
629 {
630 int srclen = 0, dstlen = 0;
631 int srcsg = minsg, dstsg = minsg;
632 int offset = 0, soffset = 0, less, sless = 0;
633
634 if (sg_dma_len(src) == srcskip) {
635 src = sg_next(src);
636 srcskip = 0;
637 }
638 if (sg_dma_len(dst) == dstskip) {
639 dst = sg_next(dst);
640 dstskip = 0;
641 }
642
643 while (src && dst &&
644 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
645 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
646 CHCR_SRC_SG_SIZE);
647 srclen += sless;
648 srcsg++;
649 offset = 0;
650 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
651 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
652 if (srclen <= dstlen)
653 break;
654 less = min_t(unsigned int, sg_dma_len(dst) - offset -
655 dstskip, CHCR_DST_SG_SIZE);
656 dstlen += less;
657 offset += less;
658 if ((offset + dstskip) == sg_dma_len(dst)) {
659 dst = sg_next(dst);
660 offset = 0;
661 }
662 dstsg++;
663 dstskip = 0;
664 }
665 soffset += sless;
666 if ((soffset + srcskip) == sg_dma_len(src)) {
667 src = sg_next(src);
668 srcskip = 0;
669 soffset = 0;
670 }
671
672 }
673 return min(srclen, dstlen);
674 }
675
chcr_cipher_fallback(struct crypto_skcipher * cipher,u32 flags,struct scatterlist * src,struct scatterlist * dst,unsigned int nbytes,u8 * iv,unsigned short op_type)676 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
677 u32 flags,
678 struct scatterlist *src,
679 struct scatterlist *dst,
680 unsigned int nbytes,
681 u8 *iv,
682 unsigned short op_type)
683 {
684 int err;
685
686 SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
687
688 skcipher_request_set_tfm(subreq, cipher);
689 skcipher_request_set_callback(subreq, flags, NULL, NULL);
690 skcipher_request_set_crypt(subreq, src, dst,
691 nbytes, iv);
692
693 err = op_type ? crypto_skcipher_decrypt(subreq) :
694 crypto_skcipher_encrypt(subreq);
695 skcipher_request_zero(subreq);
696
697 return err;
698
699 }
create_wreq(struct chcr_context * ctx,struct chcr_wr * chcr_req,struct crypto_async_request * req,unsigned int imm,int hash_sz,unsigned int len16,unsigned int sc_len,unsigned int lcb)700 static inline void create_wreq(struct chcr_context *ctx,
701 struct chcr_wr *chcr_req,
702 struct crypto_async_request *req,
703 unsigned int imm,
704 int hash_sz,
705 unsigned int len16,
706 unsigned int sc_len,
707 unsigned int lcb)
708 {
709 struct uld_ctx *u_ctx = ULD_CTX(ctx);
710 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
711
712
713 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
714 chcr_req->wreq.pld_size_hash_size =
715 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
716 chcr_req->wreq.len16_pkd =
717 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
718 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
719 chcr_req->wreq.rx_chid_to_rx_q_id =
720 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
721 !!lcb, ctx->tx_qidx);
722
723 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
724 qid);
725 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
726 ((sizeof(chcr_req->wreq)) >> 4)));
727
728 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
729 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
730 sizeof(chcr_req->key_ctx) + sc_len);
731 }
732
733 /**
734 * create_cipher_wr - form the WR for cipher operations
735 * @req: cipher req.
736 * @ctx: crypto driver context of the request.
737 * @qid: ingress qid where response of this WR should be received.
738 * @op_type: encryption or decryption
739 */
create_cipher_wr(struct cipher_wr_param * wrparam)740 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
741 {
742 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
743 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
744 struct sk_buff *skb = NULL;
745 struct chcr_wr *chcr_req;
746 struct cpl_rx_phys_dsgl *phys_cpl;
747 struct ulptx_sgl *ulptx;
748 struct chcr_blkcipher_req_ctx *reqctx =
749 ablkcipher_request_ctx(wrparam->req);
750 unsigned int temp = 0, transhdr_len, dst_size;
751 int error;
752 int nents;
753 unsigned int kctx_len;
754 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
755 GFP_KERNEL : GFP_ATOMIC;
756 struct adapter *adap = padap(c_ctx(tfm)->dev);
757
758 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
759 reqctx->dst_ofst);
760 dst_size = get_space_for_phys_dsgl(nents);
761 kctx_len = roundup(ablkctx->enckey_len, 16);
762 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
763 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
764 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
765 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
766 (sgl_len(nents) * 8);
767 transhdr_len += temp;
768 transhdr_len = roundup(transhdr_len, 16);
769 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
770 if (!skb) {
771 error = -ENOMEM;
772 goto err;
773 }
774 chcr_req = __skb_put_zero(skb, transhdr_len);
775 chcr_req->sec_cpl.op_ivinsrtofst =
776 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
777
778 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
779 chcr_req->sec_cpl.aadstart_cipherstop_hi =
780 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
781
782 chcr_req->sec_cpl.cipherstop_lo_authinsert =
783 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
784 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
785 ablkctx->ciph_mode,
786 0, 0, IV >> 1);
787 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
788 0, 1, dst_size);
789
790 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
791 if ((reqctx->op == CHCR_DECRYPT_OP) &&
792 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
793 CRYPTO_ALG_SUB_TYPE_CTR)) &&
794 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
795 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
796 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
797 } else {
798 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
799 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
800 memcpy(chcr_req->key_ctx.key, ablkctx->key,
801 ablkctx->enckey_len);
802 } else {
803 memcpy(chcr_req->key_ctx.key, ablkctx->key +
804 (ablkctx->enckey_len >> 1),
805 ablkctx->enckey_len >> 1);
806 memcpy(chcr_req->key_ctx.key +
807 (ablkctx->enckey_len >> 1),
808 ablkctx->key,
809 ablkctx->enckey_len >> 1);
810 }
811 }
812 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
813 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
814 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
815 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
816
817 atomic_inc(&adap->chcr_stats.cipher_rqst);
818 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
819 + (reqctx->imm ? (wrparam->bytes) : 0);
820 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
821 transhdr_len, temp,
822 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
823 reqctx->skb = skb;
824
825 if (reqctx->op && (ablkctx->ciph_mode ==
826 CHCR_SCMD_CIPHER_MODE_AES_CBC))
827 sg_pcopy_to_buffer(wrparam->req->src,
828 sg_nents(wrparam->req->src), wrparam->req->info, 16,
829 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
830
831 return skb;
832 err:
833 return ERR_PTR(error);
834 }
835
chcr_keyctx_ck_size(unsigned int keylen)836 static inline int chcr_keyctx_ck_size(unsigned int keylen)
837 {
838 int ck_size = 0;
839
840 if (keylen == AES_KEYSIZE_128)
841 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
842 else if (keylen == AES_KEYSIZE_192)
843 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
844 else if (keylen == AES_KEYSIZE_256)
845 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
846 else
847 ck_size = 0;
848
849 return ck_size;
850 }
chcr_cipher_fallback_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)851 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
852 const u8 *key,
853 unsigned int keylen)
854 {
855 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
856 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
857 int err = 0;
858
859 crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
860 crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
861 CRYPTO_TFM_REQ_MASK);
862 err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
863 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
864 tfm->crt_flags |=
865 crypto_skcipher_get_flags(ablkctx->sw_cipher) &
866 CRYPTO_TFM_RES_MASK;
867 return err;
868 }
869
chcr_aes_cbc_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)870 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
871 const u8 *key,
872 unsigned int keylen)
873 {
874 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
875 unsigned int ck_size, context_size;
876 u16 alignment = 0;
877 int err;
878
879 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
880 if (err)
881 goto badkey_err;
882
883 ck_size = chcr_keyctx_ck_size(keylen);
884 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
885 memcpy(ablkctx->key, key, keylen);
886 ablkctx->enckey_len = keylen;
887 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
888 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
889 keylen + alignment) >> 4;
890
891 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
892 0, 0, context_size);
893 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
894 return 0;
895 badkey_err:
896 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
897 ablkctx->enckey_len = 0;
898
899 return err;
900 }
901
chcr_aes_ctr_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)902 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
903 const u8 *key,
904 unsigned int keylen)
905 {
906 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
907 unsigned int ck_size, context_size;
908 u16 alignment = 0;
909 int err;
910
911 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
912 if (err)
913 goto badkey_err;
914 ck_size = chcr_keyctx_ck_size(keylen);
915 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
916 memcpy(ablkctx->key, key, keylen);
917 ablkctx->enckey_len = keylen;
918 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
919 keylen + alignment) >> 4;
920
921 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
922 0, 0, context_size);
923 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
924
925 return 0;
926 badkey_err:
927 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
928 ablkctx->enckey_len = 0;
929
930 return err;
931 }
932
chcr_aes_rfc3686_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)933 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
934 const u8 *key,
935 unsigned int keylen)
936 {
937 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
938 unsigned int ck_size, context_size;
939 u16 alignment = 0;
940 int err;
941
942 if (keylen < CTR_RFC3686_NONCE_SIZE)
943 return -EINVAL;
944 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
945 CTR_RFC3686_NONCE_SIZE);
946
947 keylen -= CTR_RFC3686_NONCE_SIZE;
948 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
949 if (err)
950 goto badkey_err;
951
952 ck_size = chcr_keyctx_ck_size(keylen);
953 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
954 memcpy(ablkctx->key, key, keylen);
955 ablkctx->enckey_len = keylen;
956 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
957 keylen + alignment) >> 4;
958
959 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
960 0, 0, context_size);
961 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
962
963 return 0;
964 badkey_err:
965 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
966 ablkctx->enckey_len = 0;
967
968 return err;
969 }
ctr_add_iv(u8 * dstiv,u8 * srciv,u32 add)970 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
971 {
972 unsigned int size = AES_BLOCK_SIZE;
973 __be32 *b = (__be32 *)(dstiv + size);
974 u32 c, prev;
975
976 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
977 for (; size >= 4; size -= 4) {
978 prev = be32_to_cpu(*--b);
979 c = prev + add;
980 *b = cpu_to_be32(c);
981 if (prev < c)
982 break;
983 add = 1;
984 }
985
986 }
987
adjust_ctr_overflow(u8 * iv,u32 bytes)988 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
989 {
990 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
991 u64 c;
992 u32 temp = be32_to_cpu(*--b);
993
994 temp = ~temp;
995 c = (u64)temp + 1; // No of block can processed withou overflow
996 if ((bytes / AES_BLOCK_SIZE) > c)
997 bytes = c * AES_BLOCK_SIZE;
998 return bytes;
999 }
1000
chcr_update_tweak(struct ablkcipher_request * req,u8 * iv,u32 isfinal)1001 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1002 u32 isfinal)
1003 {
1004 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1005 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1006 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1007 struct crypto_cipher *cipher;
1008 int ret, i;
1009 u8 *key;
1010 unsigned int keylen;
1011 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1012 int round8 = round / 8;
1013
1014 cipher = ablkctx->aes_generic;
1015 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1016
1017 keylen = ablkctx->enckey_len / 2;
1018 key = ablkctx->key + keylen;
1019 ret = crypto_cipher_setkey(cipher, key, keylen);
1020 if (ret)
1021 goto out;
1022 crypto_cipher_encrypt_one(cipher, iv, iv);
1023 for (i = 0; i < round8; i++)
1024 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1025
1026 for (i = 0; i < (round % 8); i++)
1027 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1028
1029 if (!isfinal)
1030 crypto_cipher_decrypt_one(cipher, iv, iv);
1031 out:
1032 return ret;
1033 }
1034
chcr_update_cipher_iv(struct ablkcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1035 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1036 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1037 {
1038 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1039 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1040 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1041 int ret = 0;
1042
1043 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1044 ctr_add_iv(iv, req->info, (reqctx->processed /
1045 AES_BLOCK_SIZE));
1046 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1047 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1048 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1049 AES_BLOCK_SIZE) + 1);
1050 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1051 ret = chcr_update_tweak(req, iv, 0);
1052 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1053 if (reqctx->op)
1054 /*Updated before sending last WR*/
1055 memcpy(iv, req->info, AES_BLOCK_SIZE);
1056 else
1057 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1058 }
1059
1060 return ret;
1061
1062 }
1063
1064 /* We need separate function for final iv because in rfc3686 Initial counter
1065 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1066 * for subsequent update requests
1067 */
1068
chcr_final_cipher_iv(struct ablkcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1069 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1070 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1071 {
1072 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1073 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1074 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1075 int ret = 0;
1076
1077 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1078 ctr_add_iv(iv, req->info, (reqctx->processed /
1079 AES_BLOCK_SIZE));
1080 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1081 ret = chcr_update_tweak(req, iv, 1);
1082 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1083 /*Already updated for Decrypt*/
1084 if (!reqctx->op)
1085 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1086
1087 }
1088 return ret;
1089
1090 }
1091
chcr_handle_cipher_resp(struct ablkcipher_request * req,unsigned char * input,int err)1092 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1093 unsigned char *input, int err)
1094 {
1095 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1096 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1097 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1098 struct sk_buff *skb;
1099 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1100 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1101 struct cipher_wr_param wrparam;
1102 int bytes;
1103
1104 if (err)
1105 goto unmap;
1106 if (req->nbytes == reqctx->processed) {
1107 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1108 req);
1109 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1110 goto complete;
1111 }
1112
1113 if (!reqctx->imm) {
1114 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1115 CIP_SPACE_LEFT(ablkctx->enckey_len),
1116 reqctx->src_ofst, reqctx->dst_ofst);
1117 if ((bytes + reqctx->processed) >= req->nbytes)
1118 bytes = req->nbytes - reqctx->processed;
1119 else
1120 bytes = rounddown(bytes, 16);
1121 } else {
1122 /*CTR mode counter overfloa*/
1123 bytes = req->nbytes - reqctx->processed;
1124 }
1125 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1126 if (err)
1127 goto unmap;
1128
1129 if (unlikely(bytes == 0)) {
1130 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1131 req);
1132 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1133 req->base.flags,
1134 req->src,
1135 req->dst,
1136 req->nbytes,
1137 req->info,
1138 reqctx->op);
1139 goto complete;
1140 }
1141
1142 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1143 CRYPTO_ALG_SUB_TYPE_CTR)
1144 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1145 wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1146 wrparam.req = req;
1147 wrparam.bytes = bytes;
1148 skb = create_cipher_wr(&wrparam);
1149 if (IS_ERR(skb)) {
1150 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1151 err = PTR_ERR(skb);
1152 goto unmap;
1153 }
1154 skb->dev = u_ctx->lldi.ports[0];
1155 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1156 chcr_send_wr(skb);
1157 reqctx->last_req_len = bytes;
1158 reqctx->processed += bytes;
1159 return 0;
1160 unmap:
1161 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1162 complete:
1163 req->base.complete(&req->base, err);
1164 return err;
1165 }
1166
process_cipher(struct ablkcipher_request * req,unsigned short qid,struct sk_buff ** skb,unsigned short op_type)1167 static int process_cipher(struct ablkcipher_request *req,
1168 unsigned short qid,
1169 struct sk_buff **skb,
1170 unsigned short op_type)
1171 {
1172 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1173 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1174 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1175 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1176 struct cipher_wr_param wrparam;
1177 int bytes, err = -EINVAL;
1178
1179 reqctx->processed = 0;
1180 if (!req->info)
1181 goto error;
1182 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1183 (req->nbytes == 0) ||
1184 (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1185 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1186 ablkctx->enckey_len, req->nbytes, ivsize);
1187 goto error;
1188 }
1189 chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1190 if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1191 AES_MIN_KEY_SIZE +
1192 sizeof(struct cpl_rx_phys_dsgl) +
1193 /*Min dsgl size*/
1194 32))) {
1195 /* Can be sent as Imm*/
1196 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1197
1198 dnents = sg_nents_xlen(req->dst, req->nbytes,
1199 CHCR_DST_SG_SIZE, 0);
1200 phys_dsgl = get_space_for_phys_dsgl(dnents);
1201 kctx_len = roundup(ablkctx->enckey_len, 16);
1202 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1203 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1204 SGE_MAX_WR_LEN;
1205 bytes = IV + req->nbytes;
1206
1207 } else {
1208 reqctx->imm = 0;
1209 }
1210
1211 if (!reqctx->imm) {
1212 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1213 CIP_SPACE_LEFT(ablkctx->enckey_len),
1214 0, 0);
1215 if ((bytes + reqctx->processed) >= req->nbytes)
1216 bytes = req->nbytes - reqctx->processed;
1217 else
1218 bytes = rounddown(bytes, 16);
1219 } else {
1220 bytes = req->nbytes;
1221 }
1222 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1223 CRYPTO_ALG_SUB_TYPE_CTR) {
1224 bytes = adjust_ctr_overflow(req->info, bytes);
1225 }
1226 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1227 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1228 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1229 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1230 CTR_RFC3686_IV_SIZE);
1231
1232 /* initialize counter portion of counter block */
1233 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1234 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1235
1236 } else {
1237
1238 memcpy(reqctx->iv, req->info, IV);
1239 }
1240 if (unlikely(bytes == 0)) {
1241 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1242 req);
1243 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1244 req->base.flags,
1245 req->src,
1246 req->dst,
1247 req->nbytes,
1248 reqctx->iv,
1249 op_type);
1250 goto error;
1251 }
1252 reqctx->op = op_type;
1253 reqctx->srcsg = req->src;
1254 reqctx->dstsg = req->dst;
1255 reqctx->src_ofst = 0;
1256 reqctx->dst_ofst = 0;
1257 wrparam.qid = qid;
1258 wrparam.req = req;
1259 wrparam.bytes = bytes;
1260 *skb = create_cipher_wr(&wrparam);
1261 if (IS_ERR(*skb)) {
1262 err = PTR_ERR(*skb);
1263 goto unmap;
1264 }
1265 reqctx->processed = bytes;
1266 reqctx->last_req_len = bytes;
1267
1268 return 0;
1269 unmap:
1270 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1271 error:
1272 return err;
1273 }
1274
chcr_aes_encrypt(struct ablkcipher_request * req)1275 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1276 {
1277 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1278 struct sk_buff *skb = NULL;
1279 int err, isfull = 0;
1280 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1281
1282 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1283 c_ctx(tfm)->tx_qidx))) {
1284 isfull = 1;
1285 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1286 return -ENOSPC;
1287 }
1288
1289 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1290 &skb, CHCR_ENCRYPT_OP);
1291 if (err || !skb)
1292 return err;
1293 skb->dev = u_ctx->lldi.ports[0];
1294 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1295 chcr_send_wr(skb);
1296 return isfull ? -EBUSY : -EINPROGRESS;
1297 }
1298
chcr_aes_decrypt(struct ablkcipher_request * req)1299 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1300 {
1301 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1302 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1303 struct sk_buff *skb = NULL;
1304 int err, isfull = 0;
1305
1306 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1307 c_ctx(tfm)->tx_qidx))) {
1308 isfull = 1;
1309 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1310 return -ENOSPC;
1311 }
1312
1313 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1314 &skb, CHCR_DECRYPT_OP);
1315 if (err || !skb)
1316 return err;
1317 skb->dev = u_ctx->lldi.ports[0];
1318 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1319 chcr_send_wr(skb);
1320 return isfull ? -EBUSY : -EINPROGRESS;
1321 }
1322
chcr_device_init(struct chcr_context * ctx)1323 static int chcr_device_init(struct chcr_context *ctx)
1324 {
1325 struct uld_ctx *u_ctx = NULL;
1326 struct adapter *adap;
1327 unsigned int id;
1328 int txq_perchan, txq_idx, ntxq;
1329 int err = 0, rxq_perchan, rxq_idx;
1330
1331 id = smp_processor_id();
1332 if (!ctx->dev) {
1333 u_ctx = assign_chcr_device();
1334 if (!u_ctx) {
1335 pr_err("chcr device assignment fails\n");
1336 goto out;
1337 }
1338 ctx->dev = u_ctx->dev;
1339 adap = padap(ctx->dev);
1340 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1341 adap->vres.ncrypto_fc);
1342 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1343 txq_perchan = ntxq / u_ctx->lldi.nchan;
1344 spin_lock(&ctx->dev->lock_chcr_dev);
1345 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1346 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1347 ctx->dev->rx_channel_id = 0;
1348 spin_unlock(&ctx->dev->lock_chcr_dev);
1349 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1350 rxq_idx += id % rxq_perchan;
1351 txq_idx = ctx->tx_chan_id * txq_perchan;
1352 txq_idx += id % txq_perchan;
1353 ctx->rx_qidx = rxq_idx;
1354 ctx->tx_qidx = txq_idx;
1355 /* Channel Id used by SGE to forward packet to Host.
1356 * Same value should be used in cpl_fw6_pld RSS_CH field
1357 * by FW. Driver programs PCI channel ID to be used in fw
1358 * at the time of queue allocation with value "pi->tx_chan"
1359 */
1360 ctx->pci_chan_id = txq_idx / txq_perchan;
1361 }
1362 out:
1363 return err;
1364 }
1365
chcr_cra_init(struct crypto_tfm * tfm)1366 static int chcr_cra_init(struct crypto_tfm *tfm)
1367 {
1368 struct crypto_alg *alg = tfm->__crt_alg;
1369 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1370 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1371
1372 ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1373 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1374 if (IS_ERR(ablkctx->sw_cipher)) {
1375 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1376 return PTR_ERR(ablkctx->sw_cipher);
1377 }
1378
1379 if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1380 /* To update tweak*/
1381 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1382 if (IS_ERR(ablkctx->aes_generic)) {
1383 pr_err("failed to allocate aes cipher for tweak\n");
1384 return PTR_ERR(ablkctx->aes_generic);
1385 }
1386 } else
1387 ablkctx->aes_generic = NULL;
1388
1389 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1390 return chcr_device_init(crypto_tfm_ctx(tfm));
1391 }
1392
chcr_rfc3686_init(struct crypto_tfm * tfm)1393 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1394 {
1395 struct crypto_alg *alg = tfm->__crt_alg;
1396 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1397 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1398
1399 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1400 * cannot be used as fallback in chcr_handle_cipher_response
1401 */
1402 ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1403 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1404 if (IS_ERR(ablkctx->sw_cipher)) {
1405 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1406 return PTR_ERR(ablkctx->sw_cipher);
1407 }
1408 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1409 return chcr_device_init(crypto_tfm_ctx(tfm));
1410 }
1411
1412
chcr_cra_exit(struct crypto_tfm * tfm)1413 static void chcr_cra_exit(struct crypto_tfm *tfm)
1414 {
1415 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1416 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1417
1418 crypto_free_skcipher(ablkctx->sw_cipher);
1419 if (ablkctx->aes_generic)
1420 crypto_free_cipher(ablkctx->aes_generic);
1421 }
1422
get_alg_config(struct algo_param * params,unsigned int auth_size)1423 static int get_alg_config(struct algo_param *params,
1424 unsigned int auth_size)
1425 {
1426 switch (auth_size) {
1427 case SHA1_DIGEST_SIZE:
1428 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1429 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1430 params->result_size = SHA1_DIGEST_SIZE;
1431 break;
1432 case SHA224_DIGEST_SIZE:
1433 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1434 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1435 params->result_size = SHA256_DIGEST_SIZE;
1436 break;
1437 case SHA256_DIGEST_SIZE:
1438 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1439 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1440 params->result_size = SHA256_DIGEST_SIZE;
1441 break;
1442 case SHA384_DIGEST_SIZE:
1443 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1444 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1445 params->result_size = SHA512_DIGEST_SIZE;
1446 break;
1447 case SHA512_DIGEST_SIZE:
1448 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1449 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1450 params->result_size = SHA512_DIGEST_SIZE;
1451 break;
1452 default:
1453 pr_err("chcr : ERROR, unsupported digest size\n");
1454 return -EINVAL;
1455 }
1456 return 0;
1457 }
1458
chcr_free_shash(struct crypto_shash * base_hash)1459 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1460 {
1461 crypto_free_shash(base_hash);
1462 }
1463
1464 /**
1465 * create_hash_wr - Create hash work request
1466 * @req - Cipher req base
1467 */
create_hash_wr(struct ahash_request * req,struct hash_wr_param * param)1468 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1469 struct hash_wr_param *param)
1470 {
1471 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1472 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1473 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1474 struct sk_buff *skb = NULL;
1475 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1476 struct chcr_wr *chcr_req;
1477 struct ulptx_sgl *ulptx;
1478 unsigned int nents = 0, transhdr_len;
1479 unsigned int temp = 0;
1480 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1481 GFP_ATOMIC;
1482 struct adapter *adap = padap(h_ctx(tfm)->dev);
1483 int error = 0;
1484
1485 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1486 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1487 param->sg_len) <= SGE_MAX_WR_LEN;
1488 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1489 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1490 nents += param->bfr_len ? 1 : 0;
1491 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1492 param->sg_len, 16) : (sgl_len(nents) * 8);
1493 transhdr_len = roundup(transhdr_len, 16);
1494
1495 skb = alloc_skb(transhdr_len, flags);
1496 if (!skb)
1497 return ERR_PTR(-ENOMEM);
1498 chcr_req = __skb_put_zero(skb, transhdr_len);
1499
1500 chcr_req->sec_cpl.op_ivinsrtofst =
1501 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
1502 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1503
1504 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1505 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1506 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1507 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1508 chcr_req->sec_cpl.seqno_numivs =
1509 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1510 param->opad_needed, 0);
1511
1512 chcr_req->sec_cpl.ivgen_hdrlen =
1513 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1514
1515 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1516 param->alg_prm.result_size);
1517
1518 if (param->opad_needed)
1519 memcpy(chcr_req->key_ctx.key +
1520 ((param->alg_prm.result_size <= 32) ? 32 :
1521 CHCR_HASH_MAX_DIGEST_SIZE),
1522 hmacctx->opad, param->alg_prm.result_size);
1523
1524 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1525 param->alg_prm.mk_size, 0,
1526 param->opad_needed,
1527 ((param->kctx_len +
1528 sizeof(chcr_req->key_ctx)) >> 4));
1529 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1530 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1531 DUMMY_BYTES);
1532 if (param->bfr_len != 0) {
1533 req_ctx->hctx_wr.dma_addr =
1534 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1535 param->bfr_len, DMA_TO_DEVICE);
1536 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1537 req_ctx->hctx_wr. dma_addr)) {
1538 error = -ENOMEM;
1539 goto err;
1540 }
1541 req_ctx->hctx_wr.dma_len = param->bfr_len;
1542 } else {
1543 req_ctx->hctx_wr.dma_addr = 0;
1544 }
1545 chcr_add_hash_src_ent(req, ulptx, param);
1546 /* Request upto max wr size */
1547 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1548 (param->sg_len + param->bfr_len) : 0);
1549 atomic_inc(&adap->chcr_stats.digest_rqst);
1550 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1551 param->hash_size, transhdr_len,
1552 temp, 0);
1553 req_ctx->hctx_wr.skb = skb;
1554 return skb;
1555 err:
1556 kfree_skb(skb);
1557 return ERR_PTR(error);
1558 }
1559
chcr_ahash_update(struct ahash_request * req)1560 static int chcr_ahash_update(struct ahash_request *req)
1561 {
1562 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1563 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1564 struct uld_ctx *u_ctx = NULL;
1565 struct sk_buff *skb;
1566 u8 remainder = 0, bs;
1567 unsigned int nbytes = req->nbytes;
1568 struct hash_wr_param params;
1569 int error, isfull = 0;
1570
1571 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1572 u_ctx = ULD_CTX(h_ctx(rtfm));
1573 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1574 h_ctx(rtfm)->tx_qidx))) {
1575 isfull = 1;
1576 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1577 return -ENOSPC;
1578 }
1579
1580 if (nbytes + req_ctx->reqlen >= bs) {
1581 remainder = (nbytes + req_ctx->reqlen) % bs;
1582 nbytes = nbytes + req_ctx->reqlen - remainder;
1583 } else {
1584 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1585 + req_ctx->reqlen, nbytes, 0);
1586 req_ctx->reqlen += nbytes;
1587 return 0;
1588 }
1589 chcr_init_hctx_per_wr(req_ctx);
1590 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1591 if (error)
1592 return -ENOMEM;
1593 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1594 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1595 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1596 HASH_SPACE_LEFT(params.kctx_len), 0);
1597 if (params.sg_len > req->nbytes)
1598 params.sg_len = req->nbytes;
1599 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1600 req_ctx->reqlen;
1601 params.opad_needed = 0;
1602 params.more = 1;
1603 params.last = 0;
1604 params.bfr_len = req_ctx->reqlen;
1605 params.scmd1 = 0;
1606 req_ctx->hctx_wr.srcsg = req->src;
1607
1608 params.hash_size = params.alg_prm.result_size;
1609 req_ctx->data_len += params.sg_len + params.bfr_len;
1610 skb = create_hash_wr(req, ¶ms);
1611 if (IS_ERR(skb)) {
1612 error = PTR_ERR(skb);
1613 goto unmap;
1614 }
1615
1616 req_ctx->hctx_wr.processed += params.sg_len;
1617 if (remainder) {
1618 /* Swap buffers */
1619 swap(req_ctx->reqbfr, req_ctx->skbfr);
1620 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1621 req_ctx->reqbfr, remainder, req->nbytes -
1622 remainder);
1623 }
1624 req_ctx->reqlen = remainder;
1625 skb->dev = u_ctx->lldi.ports[0];
1626 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1627 chcr_send_wr(skb);
1628
1629 return isfull ? -EBUSY : -EINPROGRESS;
1630 unmap:
1631 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1632 return error;
1633 }
1634
create_last_hash_block(char * bfr_ptr,unsigned int bs,u64 scmd1)1635 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1636 {
1637 memset(bfr_ptr, 0, bs);
1638 *bfr_ptr = 0x80;
1639 if (bs == 64)
1640 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1641 else
1642 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1643 }
1644
chcr_ahash_final(struct ahash_request * req)1645 static int chcr_ahash_final(struct ahash_request *req)
1646 {
1647 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1648 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1649 struct hash_wr_param params;
1650 struct sk_buff *skb;
1651 struct uld_ctx *u_ctx = NULL;
1652 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1653
1654 chcr_init_hctx_per_wr(req_ctx);
1655 u_ctx = ULD_CTX(h_ctx(rtfm));
1656 if (is_hmac(crypto_ahash_tfm(rtfm)))
1657 params.opad_needed = 1;
1658 else
1659 params.opad_needed = 0;
1660 params.sg_len = 0;
1661 req_ctx->hctx_wr.isfinal = 1;
1662 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1663 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1664 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1665 params.opad_needed = 1;
1666 params.kctx_len *= 2;
1667 } else {
1668 params.opad_needed = 0;
1669 }
1670
1671 req_ctx->hctx_wr.result = 1;
1672 params.bfr_len = req_ctx->reqlen;
1673 req_ctx->data_len += params.bfr_len + params.sg_len;
1674 req_ctx->hctx_wr.srcsg = req->src;
1675 if (req_ctx->reqlen == 0) {
1676 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1677 params.last = 0;
1678 params.more = 1;
1679 params.scmd1 = 0;
1680 params.bfr_len = bs;
1681
1682 } else {
1683 params.scmd1 = req_ctx->data_len;
1684 params.last = 1;
1685 params.more = 0;
1686 }
1687 params.hash_size = crypto_ahash_digestsize(rtfm);
1688 skb = create_hash_wr(req, ¶ms);
1689 if (IS_ERR(skb))
1690 return PTR_ERR(skb);
1691 req_ctx->reqlen = 0;
1692 skb->dev = u_ctx->lldi.ports[0];
1693 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1694 chcr_send_wr(skb);
1695 return -EINPROGRESS;
1696 }
1697
chcr_ahash_finup(struct ahash_request * req)1698 static int chcr_ahash_finup(struct ahash_request *req)
1699 {
1700 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1701 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1702 struct uld_ctx *u_ctx = NULL;
1703 struct sk_buff *skb;
1704 struct hash_wr_param params;
1705 u8 bs;
1706 int error, isfull = 0;
1707
1708 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1709 u_ctx = ULD_CTX(h_ctx(rtfm));
1710
1711 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1712 h_ctx(rtfm)->tx_qidx))) {
1713 isfull = 1;
1714 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1715 return -ENOSPC;
1716 }
1717 chcr_init_hctx_per_wr(req_ctx);
1718 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1719 if (error)
1720 return -ENOMEM;
1721
1722 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1723 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1724 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1725 params.kctx_len *= 2;
1726 params.opad_needed = 1;
1727 } else {
1728 params.opad_needed = 0;
1729 }
1730
1731 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1732 HASH_SPACE_LEFT(params.kctx_len), 0);
1733 if (params.sg_len < req->nbytes) {
1734 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1735 params.kctx_len /= 2;
1736 params.opad_needed = 0;
1737 }
1738 params.last = 0;
1739 params.more = 1;
1740 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1741 - req_ctx->reqlen;
1742 params.hash_size = params.alg_prm.result_size;
1743 params.scmd1 = 0;
1744 } else {
1745 params.last = 1;
1746 params.more = 0;
1747 params.sg_len = req->nbytes;
1748 params.hash_size = crypto_ahash_digestsize(rtfm);
1749 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1750 params.sg_len;
1751 }
1752 params.bfr_len = req_ctx->reqlen;
1753 req_ctx->data_len += params.bfr_len + params.sg_len;
1754 req_ctx->hctx_wr.result = 1;
1755 req_ctx->hctx_wr.srcsg = req->src;
1756 if ((req_ctx->reqlen + req->nbytes) == 0) {
1757 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1758 params.last = 0;
1759 params.more = 1;
1760 params.scmd1 = 0;
1761 params.bfr_len = bs;
1762 }
1763 skb = create_hash_wr(req, ¶ms);
1764 if (IS_ERR(skb)) {
1765 error = PTR_ERR(skb);
1766 goto unmap;
1767 }
1768 req_ctx->reqlen = 0;
1769 req_ctx->hctx_wr.processed += params.sg_len;
1770 skb->dev = u_ctx->lldi.ports[0];
1771 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1772 chcr_send_wr(skb);
1773
1774 return isfull ? -EBUSY : -EINPROGRESS;
1775 unmap:
1776 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1777 return error;
1778 }
1779
chcr_ahash_digest(struct ahash_request * req)1780 static int chcr_ahash_digest(struct ahash_request *req)
1781 {
1782 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1783 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1784 struct uld_ctx *u_ctx = NULL;
1785 struct sk_buff *skb;
1786 struct hash_wr_param params;
1787 u8 bs;
1788 int error, isfull = 0;
1789
1790 rtfm->init(req);
1791 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1792
1793 u_ctx = ULD_CTX(h_ctx(rtfm));
1794 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1795 h_ctx(rtfm)->tx_qidx))) {
1796 isfull = 1;
1797 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1798 return -ENOSPC;
1799 }
1800
1801 chcr_init_hctx_per_wr(req_ctx);
1802 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1803 if (error)
1804 return -ENOMEM;
1805
1806 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1807 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1808 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1809 params.kctx_len *= 2;
1810 params.opad_needed = 1;
1811 } else {
1812 params.opad_needed = 0;
1813 }
1814 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1815 HASH_SPACE_LEFT(params.kctx_len), 0);
1816 if (params.sg_len < req->nbytes) {
1817 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1818 params.kctx_len /= 2;
1819 params.opad_needed = 0;
1820 }
1821 params.last = 0;
1822 params.more = 1;
1823 params.scmd1 = 0;
1824 params.sg_len = rounddown(params.sg_len, bs);
1825 params.hash_size = params.alg_prm.result_size;
1826 } else {
1827 params.sg_len = req->nbytes;
1828 params.hash_size = crypto_ahash_digestsize(rtfm);
1829 params.last = 1;
1830 params.more = 0;
1831 params.scmd1 = req->nbytes + req_ctx->data_len;
1832
1833 }
1834 params.bfr_len = 0;
1835 req_ctx->hctx_wr.result = 1;
1836 req_ctx->hctx_wr.srcsg = req->src;
1837 req_ctx->data_len += params.bfr_len + params.sg_len;
1838
1839 if (req->nbytes == 0) {
1840 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1841 params.more = 1;
1842 params.bfr_len = bs;
1843 }
1844
1845 skb = create_hash_wr(req, ¶ms);
1846 if (IS_ERR(skb)) {
1847 error = PTR_ERR(skb);
1848 goto unmap;
1849 }
1850 req_ctx->hctx_wr.processed += params.sg_len;
1851 skb->dev = u_ctx->lldi.ports[0];
1852 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1853 chcr_send_wr(skb);
1854 return isfull ? -EBUSY : -EINPROGRESS;
1855 unmap:
1856 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1857 return error;
1858 }
1859
chcr_ahash_continue(struct ahash_request * req)1860 static int chcr_ahash_continue(struct ahash_request *req)
1861 {
1862 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1863 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1864 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1865 struct uld_ctx *u_ctx = NULL;
1866 struct sk_buff *skb;
1867 struct hash_wr_param params;
1868 u8 bs;
1869 int error;
1870
1871 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1872 u_ctx = ULD_CTX(h_ctx(rtfm));
1873 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1874 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1875 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1876 params.kctx_len *= 2;
1877 params.opad_needed = 1;
1878 } else {
1879 params.opad_needed = 0;
1880 }
1881 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1882 HASH_SPACE_LEFT(params.kctx_len),
1883 hctx_wr->src_ofst);
1884 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1885 params.sg_len = req->nbytes - hctx_wr->processed;
1886 if (!hctx_wr->result ||
1887 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1888 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1889 params.kctx_len /= 2;
1890 params.opad_needed = 0;
1891 }
1892 params.last = 0;
1893 params.more = 1;
1894 params.sg_len = rounddown(params.sg_len, bs);
1895 params.hash_size = params.alg_prm.result_size;
1896 params.scmd1 = 0;
1897 } else {
1898 params.last = 1;
1899 params.more = 0;
1900 params.hash_size = crypto_ahash_digestsize(rtfm);
1901 params.scmd1 = reqctx->data_len + params.sg_len;
1902 }
1903 params.bfr_len = 0;
1904 reqctx->data_len += params.sg_len;
1905 skb = create_hash_wr(req, ¶ms);
1906 if (IS_ERR(skb)) {
1907 error = PTR_ERR(skb);
1908 goto err;
1909 }
1910 hctx_wr->processed += params.sg_len;
1911 skb->dev = u_ctx->lldi.ports[0];
1912 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1913 chcr_send_wr(skb);
1914 return 0;
1915 err:
1916 return error;
1917 }
1918
chcr_handle_ahash_resp(struct ahash_request * req,unsigned char * input,int err)1919 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1920 unsigned char *input,
1921 int err)
1922 {
1923 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1924 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1925 int digestsize, updated_digestsize;
1926 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1927 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1928
1929 if (input == NULL)
1930 goto out;
1931 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1932 updated_digestsize = digestsize;
1933 if (digestsize == SHA224_DIGEST_SIZE)
1934 updated_digestsize = SHA256_DIGEST_SIZE;
1935 else if (digestsize == SHA384_DIGEST_SIZE)
1936 updated_digestsize = SHA512_DIGEST_SIZE;
1937
1938 if (hctx_wr->dma_addr) {
1939 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
1940 hctx_wr->dma_len, DMA_TO_DEVICE);
1941 hctx_wr->dma_addr = 0;
1942 }
1943 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
1944 req->nbytes)) {
1945 if (hctx_wr->result == 1) {
1946 hctx_wr->result = 0;
1947 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
1948 digestsize);
1949 } else {
1950 memcpy(reqctx->partial_hash,
1951 input + sizeof(struct cpl_fw6_pld),
1952 updated_digestsize);
1953
1954 }
1955 goto unmap;
1956 }
1957 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
1958 updated_digestsize);
1959
1960 err = chcr_ahash_continue(req);
1961 if (err)
1962 goto unmap;
1963 return;
1964 unmap:
1965 if (hctx_wr->is_sg_map)
1966 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1967
1968
1969 out:
1970 req->base.complete(&req->base, err);
1971 }
1972
1973 /*
1974 * chcr_handle_resp - Unmap the DMA buffers associated with the request
1975 * @req: crypto request
1976 */
chcr_handle_resp(struct crypto_async_request * req,unsigned char * input,int err)1977 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
1978 int err)
1979 {
1980 struct crypto_tfm *tfm = req->tfm;
1981 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1982 struct adapter *adap = padap(ctx->dev);
1983
1984 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1985 case CRYPTO_ALG_TYPE_AEAD:
1986 chcr_handle_aead_resp(aead_request_cast(req), input, err);
1987 break;
1988
1989 case CRYPTO_ALG_TYPE_ABLKCIPHER:
1990 err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
1991 input, err);
1992 break;
1993
1994 case CRYPTO_ALG_TYPE_AHASH:
1995 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
1996 }
1997 atomic_inc(&adap->chcr_stats.complete);
1998 return err;
1999 }
chcr_ahash_export(struct ahash_request * areq,void * out)2000 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2001 {
2002 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2003 struct chcr_ahash_req_ctx *state = out;
2004
2005 state->reqlen = req_ctx->reqlen;
2006 state->data_len = req_ctx->data_len;
2007 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2008 memcpy(state->partial_hash, req_ctx->partial_hash,
2009 CHCR_HASH_MAX_DIGEST_SIZE);
2010 chcr_init_hctx_per_wr(state);
2011 return 0;
2012 }
2013
chcr_ahash_import(struct ahash_request * areq,const void * in)2014 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2015 {
2016 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2017 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2018
2019 req_ctx->reqlen = state->reqlen;
2020 req_ctx->data_len = state->data_len;
2021 req_ctx->reqbfr = req_ctx->bfr1;
2022 req_ctx->skbfr = req_ctx->bfr2;
2023 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2024 memcpy(req_ctx->partial_hash, state->partial_hash,
2025 CHCR_HASH_MAX_DIGEST_SIZE);
2026 chcr_init_hctx_per_wr(req_ctx);
2027 return 0;
2028 }
2029
chcr_ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2030 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2031 unsigned int keylen)
2032 {
2033 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2034 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2035 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2036 unsigned int i, err = 0, updated_digestsize;
2037
2038 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2039
2040 /* use the key to calculate the ipad and opad. ipad will sent with the
2041 * first request's data. opad will be sent with the final hash result
2042 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2043 */
2044 shash->tfm = hmacctx->base_hash;
2045 shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
2046 if (keylen > bs) {
2047 err = crypto_shash_digest(shash, key, keylen,
2048 hmacctx->ipad);
2049 if (err)
2050 goto out;
2051 keylen = digestsize;
2052 } else {
2053 memcpy(hmacctx->ipad, key, keylen);
2054 }
2055 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2056 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2057
2058 for (i = 0; i < bs / sizeof(int); i++) {
2059 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2060 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2061 }
2062
2063 updated_digestsize = digestsize;
2064 if (digestsize == SHA224_DIGEST_SIZE)
2065 updated_digestsize = SHA256_DIGEST_SIZE;
2066 else if (digestsize == SHA384_DIGEST_SIZE)
2067 updated_digestsize = SHA512_DIGEST_SIZE;
2068 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2069 hmacctx->ipad, digestsize);
2070 if (err)
2071 goto out;
2072 chcr_change_order(hmacctx->ipad, updated_digestsize);
2073
2074 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2075 hmacctx->opad, digestsize);
2076 if (err)
2077 goto out;
2078 chcr_change_order(hmacctx->opad, updated_digestsize);
2079 out:
2080 return err;
2081 }
2082
chcr_aes_xts_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int key_len)2083 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2084 unsigned int key_len)
2085 {
2086 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2087 unsigned short context_size = 0;
2088 int err;
2089
2090 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2091 if (err)
2092 goto badkey_err;
2093
2094 memcpy(ablkctx->key, key, key_len);
2095 ablkctx->enckey_len = key_len;
2096 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2097 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2098 ablkctx->key_ctx_hdr =
2099 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2100 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2101 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2102 CHCR_KEYCTX_NO_KEY, 1,
2103 0, context_size);
2104 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2105 return 0;
2106 badkey_err:
2107 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2108 ablkctx->enckey_len = 0;
2109
2110 return err;
2111 }
2112
chcr_sha_init(struct ahash_request * areq)2113 static int chcr_sha_init(struct ahash_request *areq)
2114 {
2115 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2116 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2117 int digestsize = crypto_ahash_digestsize(tfm);
2118
2119 req_ctx->data_len = 0;
2120 req_ctx->reqlen = 0;
2121 req_ctx->reqbfr = req_ctx->bfr1;
2122 req_ctx->skbfr = req_ctx->bfr2;
2123 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2124
2125 return 0;
2126 }
2127
chcr_sha_cra_init(struct crypto_tfm * tfm)2128 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2129 {
2130 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2131 sizeof(struct chcr_ahash_req_ctx));
2132 return chcr_device_init(crypto_tfm_ctx(tfm));
2133 }
2134
chcr_hmac_init(struct ahash_request * areq)2135 static int chcr_hmac_init(struct ahash_request *areq)
2136 {
2137 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2138 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2139 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2140 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2141 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2142
2143 chcr_sha_init(areq);
2144 req_ctx->data_len = bs;
2145 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2146 if (digestsize == SHA224_DIGEST_SIZE)
2147 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2148 SHA256_DIGEST_SIZE);
2149 else if (digestsize == SHA384_DIGEST_SIZE)
2150 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2151 SHA512_DIGEST_SIZE);
2152 else
2153 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2154 digestsize);
2155 }
2156 return 0;
2157 }
2158
chcr_hmac_cra_init(struct crypto_tfm * tfm)2159 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2160 {
2161 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2162 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2163 unsigned int digestsize =
2164 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2165
2166 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2167 sizeof(struct chcr_ahash_req_ctx));
2168 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2169 if (IS_ERR(hmacctx->base_hash))
2170 return PTR_ERR(hmacctx->base_hash);
2171 return chcr_device_init(crypto_tfm_ctx(tfm));
2172 }
2173
chcr_hmac_cra_exit(struct crypto_tfm * tfm)2174 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2175 {
2176 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2177 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2178
2179 if (hmacctx->base_hash) {
2180 chcr_free_shash(hmacctx->base_hash);
2181 hmacctx->base_hash = NULL;
2182 }
2183 }
2184
chcr_aead_common_exit(struct aead_request * req)2185 inline void chcr_aead_common_exit(struct aead_request *req)
2186 {
2187 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2188 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2189 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2190
2191 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2192 }
2193
chcr_aead_common_init(struct aead_request * req)2194 static int chcr_aead_common_init(struct aead_request *req)
2195 {
2196 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2197 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2198 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2199 unsigned int authsize = crypto_aead_authsize(tfm);
2200 int error = -EINVAL;
2201
2202 /* validate key size */
2203 if (aeadctx->enckey_len == 0)
2204 goto err;
2205 if (reqctx->op && req->cryptlen < authsize)
2206 goto err;
2207 if (reqctx->b0_len)
2208 reqctx->scratch_pad = reqctx->iv + IV;
2209 else
2210 reqctx->scratch_pad = NULL;
2211
2212 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2213 reqctx->op);
2214 if (error) {
2215 error = -ENOMEM;
2216 goto err;
2217 }
2218 reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2219 CHCR_SRC_SG_SIZE, 0);
2220 reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2221 CHCR_SRC_SG_SIZE, req->assoclen);
2222 return 0;
2223 err:
2224 return error;
2225 }
2226
chcr_aead_need_fallback(struct aead_request * req,int dst_nents,int aadmax,int wrlen,unsigned short op_type)2227 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2228 int aadmax, int wrlen,
2229 unsigned short op_type)
2230 {
2231 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2232
2233 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2234 dst_nents > MAX_DSGL_ENT ||
2235 (req->assoclen > aadmax) ||
2236 (wrlen > SGE_MAX_WR_LEN))
2237 return 1;
2238 return 0;
2239 }
2240
chcr_aead_fallback(struct aead_request * req,unsigned short op_type)2241 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2242 {
2243 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2244 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2245 struct aead_request *subreq = aead_request_ctx(req);
2246
2247 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2248 aead_request_set_callback(subreq, req->base.flags,
2249 req->base.complete, req->base.data);
2250 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2251 req->iv);
2252 aead_request_set_ad(subreq, req->assoclen);
2253 return op_type ? crypto_aead_decrypt(subreq) :
2254 crypto_aead_encrypt(subreq);
2255 }
2256
create_authenc_wr(struct aead_request * req,unsigned short qid,int size)2257 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2258 unsigned short qid,
2259 int size)
2260 {
2261 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2262 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2263 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2264 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2265 struct sk_buff *skb = NULL;
2266 struct chcr_wr *chcr_req;
2267 struct cpl_rx_phys_dsgl *phys_cpl;
2268 struct ulptx_sgl *ulptx;
2269 unsigned int transhdr_len;
2270 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2271 unsigned int kctx_len = 0, dnents;
2272 unsigned int assoclen = req->assoclen;
2273 unsigned int authsize = crypto_aead_authsize(tfm);
2274 int error = -EINVAL;
2275 int null = 0;
2276 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2277 GFP_ATOMIC;
2278 struct adapter *adap = padap(a_ctx(tfm)->dev);
2279
2280 if (req->cryptlen == 0)
2281 return NULL;
2282
2283 reqctx->b0_len = 0;
2284 error = chcr_aead_common_init(req);
2285 if (error)
2286 return ERR_PTR(error);
2287
2288 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2289 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2290 null = 1;
2291 assoclen = 0;
2292 reqctx->aad_nents = 0;
2293 }
2294 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2295 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2296 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
2297 req->assoclen);
2298 dnents += MIN_AUTH_SG; // For IV
2299
2300 dst_size = get_space_for_phys_dsgl(dnents);
2301 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2302 - sizeof(chcr_req->key_ctx);
2303 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2304 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2305 SGE_MAX_WR_LEN;
2306 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
2307 : (sgl_len(reqctx->src_nents + reqctx->aad_nents
2308 + MIN_GCM_SG) * 8);
2309 transhdr_len += temp;
2310 transhdr_len = roundup(transhdr_len, 16);
2311
2312 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2313 transhdr_len, reqctx->op)) {
2314 atomic_inc(&adap->chcr_stats.fallback);
2315 chcr_aead_common_exit(req);
2316 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2317 }
2318 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2319 if (!skb) {
2320 error = -ENOMEM;
2321 goto err;
2322 }
2323
2324 chcr_req = __skb_put_zero(skb, transhdr_len);
2325
2326 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2327
2328 /*
2329 * Input order is AAD,IV and Payload. where IV should be included as
2330 * the part of authdata. All other fields should be filled according
2331 * to the hardware spec
2332 */
2333 chcr_req->sec_cpl.op_ivinsrtofst =
2334 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2335 assoclen + 1);
2336 chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2337 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2338 assoclen ? 1 : 0, assoclen,
2339 assoclen + IV + 1,
2340 (temp & 0x1F0) >> 4);
2341 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2342 temp & 0xF,
2343 null ? 0 : assoclen + IV + 1,
2344 temp, temp);
2345 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2346 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2347 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2348 else
2349 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2350 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2351 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2352 temp,
2353 actx->auth_mode, aeadctx->hmac_ctrl,
2354 IV >> 1);
2355 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2356 0, 0, dst_size);
2357
2358 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2359 if (reqctx->op == CHCR_ENCRYPT_OP ||
2360 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2361 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2362 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2363 aeadctx->enckey_len);
2364 else
2365 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2366 aeadctx->enckey_len);
2367
2368 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2369 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2370 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2371 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2372 memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2373 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
2374 CTR_RFC3686_IV_SIZE);
2375 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
2376 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2377 } else {
2378 memcpy(reqctx->iv, req->iv, IV);
2379 }
2380 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2381 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2382 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2383 chcr_add_aead_src_ent(req, ulptx, assoclen);
2384 atomic_inc(&adap->chcr_stats.cipher_rqst);
2385 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2386 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2387 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2388 transhdr_len, temp, 0);
2389 reqctx->skb = skb;
2390
2391 return skb;
2392 err:
2393 chcr_aead_common_exit(req);
2394
2395 return ERR_PTR(error);
2396 }
2397
chcr_aead_dma_map(struct device * dev,struct aead_request * req,unsigned short op_type)2398 int chcr_aead_dma_map(struct device *dev,
2399 struct aead_request *req,
2400 unsigned short op_type)
2401 {
2402 int error;
2403 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2404 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2405 unsigned int authsize = crypto_aead_authsize(tfm);
2406 int dst_size;
2407
2408 dst_size = req->assoclen + req->cryptlen + (op_type ?
2409 -authsize : authsize);
2410 if (!req->cryptlen || !dst_size)
2411 return 0;
2412 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2413 DMA_BIDIRECTIONAL);
2414 if (dma_mapping_error(dev, reqctx->iv_dma))
2415 return -ENOMEM;
2416 if (reqctx->b0_len)
2417 reqctx->b0_dma = reqctx->iv_dma + IV;
2418 else
2419 reqctx->b0_dma = 0;
2420 if (req->src == req->dst) {
2421 error = dma_map_sg(dev, req->src,
2422 sg_nents_for_len(req->src, dst_size),
2423 DMA_BIDIRECTIONAL);
2424 if (!error)
2425 goto err;
2426 } else {
2427 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2428 DMA_TO_DEVICE);
2429 if (!error)
2430 goto err;
2431 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2432 DMA_FROM_DEVICE);
2433 if (!error) {
2434 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2435 DMA_TO_DEVICE);
2436 goto err;
2437 }
2438 }
2439
2440 return 0;
2441 err:
2442 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2443 return -ENOMEM;
2444 }
2445
chcr_aead_dma_unmap(struct device * dev,struct aead_request * req,unsigned short op_type)2446 void chcr_aead_dma_unmap(struct device *dev,
2447 struct aead_request *req,
2448 unsigned short op_type)
2449 {
2450 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2451 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2452 unsigned int authsize = crypto_aead_authsize(tfm);
2453 int dst_size;
2454
2455 dst_size = req->assoclen + req->cryptlen + (op_type ?
2456 -authsize : authsize);
2457 if (!req->cryptlen || !dst_size)
2458 return;
2459
2460 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2461 DMA_BIDIRECTIONAL);
2462 if (req->src == req->dst) {
2463 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2464 DMA_BIDIRECTIONAL);
2465 } else {
2466 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2467 DMA_TO_DEVICE);
2468 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2469 DMA_FROM_DEVICE);
2470 }
2471 }
2472
chcr_add_aead_src_ent(struct aead_request * req,struct ulptx_sgl * ulptx,unsigned int assoclen)2473 void chcr_add_aead_src_ent(struct aead_request *req,
2474 struct ulptx_sgl *ulptx,
2475 unsigned int assoclen)
2476 {
2477 struct ulptx_walk ulp_walk;
2478 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2479
2480 if (reqctx->imm) {
2481 u8 *buf = (u8 *)ulptx;
2482
2483 if (reqctx->b0_len) {
2484 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2485 buf += reqctx->b0_len;
2486 }
2487 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2488 buf, assoclen, 0);
2489 buf += assoclen;
2490 memcpy(buf, reqctx->iv, IV);
2491 buf += IV;
2492 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2493 buf, req->cryptlen, req->assoclen);
2494 } else {
2495 ulptx_walk_init(&ulp_walk, ulptx);
2496 if (reqctx->b0_len)
2497 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2498 &reqctx->b0_dma);
2499 ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2500 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2501 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2502 req->assoclen);
2503 ulptx_walk_end(&ulp_walk);
2504 }
2505 }
2506
chcr_add_aead_dst_ent(struct aead_request * req,struct cpl_rx_phys_dsgl * phys_cpl,unsigned int assoclen,unsigned short qid)2507 void chcr_add_aead_dst_ent(struct aead_request *req,
2508 struct cpl_rx_phys_dsgl *phys_cpl,
2509 unsigned int assoclen,
2510 unsigned short qid)
2511 {
2512 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2513 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2514 struct dsgl_walk dsgl_walk;
2515 unsigned int authsize = crypto_aead_authsize(tfm);
2516 struct chcr_context *ctx = a_ctx(tfm);
2517 u32 temp;
2518
2519 dsgl_walk_init(&dsgl_walk, phys_cpl);
2520 if (reqctx->b0_len)
2521 dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2522 dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2523 dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2524 temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
2525 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2526 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2527 }
2528
chcr_add_cipher_src_ent(struct ablkcipher_request * req,void * ulptx,struct cipher_wr_param * wrparam)2529 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2530 void *ulptx,
2531 struct cipher_wr_param *wrparam)
2532 {
2533 struct ulptx_walk ulp_walk;
2534 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2535 u8 *buf = ulptx;
2536
2537 memcpy(buf, reqctx->iv, IV);
2538 buf += IV;
2539 if (reqctx->imm) {
2540 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2541 buf, wrparam->bytes, reqctx->processed);
2542 } else {
2543 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2544 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2545 reqctx->src_ofst);
2546 reqctx->srcsg = ulp_walk.last_sg;
2547 reqctx->src_ofst = ulp_walk.last_sg_len;
2548 ulptx_walk_end(&ulp_walk);
2549 }
2550 }
2551
chcr_add_cipher_dst_ent(struct ablkcipher_request * req,struct cpl_rx_phys_dsgl * phys_cpl,struct cipher_wr_param * wrparam,unsigned short qid)2552 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2553 struct cpl_rx_phys_dsgl *phys_cpl,
2554 struct cipher_wr_param *wrparam,
2555 unsigned short qid)
2556 {
2557 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2558 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2559 struct chcr_context *ctx = c_ctx(tfm);
2560 struct dsgl_walk dsgl_walk;
2561
2562 dsgl_walk_init(&dsgl_walk, phys_cpl);
2563 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2564 reqctx->dst_ofst);
2565 reqctx->dstsg = dsgl_walk.last_sg;
2566 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2567
2568 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2569 }
2570
chcr_add_hash_src_ent(struct ahash_request * req,struct ulptx_sgl * ulptx,struct hash_wr_param * param)2571 void chcr_add_hash_src_ent(struct ahash_request *req,
2572 struct ulptx_sgl *ulptx,
2573 struct hash_wr_param *param)
2574 {
2575 struct ulptx_walk ulp_walk;
2576 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2577
2578 if (reqctx->hctx_wr.imm) {
2579 u8 *buf = (u8 *)ulptx;
2580
2581 if (param->bfr_len) {
2582 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2583 buf += param->bfr_len;
2584 }
2585
2586 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2587 sg_nents(reqctx->hctx_wr.srcsg), buf,
2588 param->sg_len, 0);
2589 } else {
2590 ulptx_walk_init(&ulp_walk, ulptx);
2591 if (param->bfr_len)
2592 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2593 &reqctx->hctx_wr.dma_addr);
2594 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2595 param->sg_len, reqctx->hctx_wr.src_ofst);
2596 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2597 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2598 ulptx_walk_end(&ulp_walk);
2599 }
2600 }
2601
chcr_hash_dma_map(struct device * dev,struct ahash_request * req)2602 int chcr_hash_dma_map(struct device *dev,
2603 struct ahash_request *req)
2604 {
2605 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2606 int error = 0;
2607
2608 if (!req->nbytes)
2609 return 0;
2610 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2611 DMA_TO_DEVICE);
2612 if (!error)
2613 return -ENOMEM;
2614 req_ctx->hctx_wr.is_sg_map = 1;
2615 return 0;
2616 }
2617
chcr_hash_dma_unmap(struct device * dev,struct ahash_request * req)2618 void chcr_hash_dma_unmap(struct device *dev,
2619 struct ahash_request *req)
2620 {
2621 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2622
2623 if (!req->nbytes)
2624 return;
2625
2626 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2627 DMA_TO_DEVICE);
2628 req_ctx->hctx_wr.is_sg_map = 0;
2629
2630 }
2631
chcr_cipher_dma_map(struct device * dev,struct ablkcipher_request * req)2632 int chcr_cipher_dma_map(struct device *dev,
2633 struct ablkcipher_request *req)
2634 {
2635 int error;
2636
2637 if (req->src == req->dst) {
2638 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2639 DMA_BIDIRECTIONAL);
2640 if (!error)
2641 goto err;
2642 } else {
2643 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2644 DMA_TO_DEVICE);
2645 if (!error)
2646 goto err;
2647 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2648 DMA_FROM_DEVICE);
2649 if (!error) {
2650 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2651 DMA_TO_DEVICE);
2652 goto err;
2653 }
2654 }
2655
2656 return 0;
2657 err:
2658 return -ENOMEM;
2659 }
2660
chcr_cipher_dma_unmap(struct device * dev,struct ablkcipher_request * req)2661 void chcr_cipher_dma_unmap(struct device *dev,
2662 struct ablkcipher_request *req)
2663 {
2664 if (req->src == req->dst) {
2665 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2666 DMA_BIDIRECTIONAL);
2667 } else {
2668 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2669 DMA_TO_DEVICE);
2670 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2671 DMA_FROM_DEVICE);
2672 }
2673 }
2674
set_msg_len(u8 * block,unsigned int msglen,int csize)2675 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2676 {
2677 __be32 data;
2678
2679 memset(block, 0, csize);
2680 block += csize;
2681
2682 if (csize >= 4)
2683 csize = 4;
2684 else if (msglen > (unsigned int)(1 << (8 * csize)))
2685 return -EOVERFLOW;
2686
2687 data = cpu_to_be32(msglen);
2688 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2689
2690 return 0;
2691 }
2692
generate_b0(struct aead_request * req,struct chcr_aead_ctx * aeadctx,unsigned short op_type)2693 static void generate_b0(struct aead_request *req,
2694 struct chcr_aead_ctx *aeadctx,
2695 unsigned short op_type)
2696 {
2697 unsigned int l, lp, m;
2698 int rc;
2699 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2700 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2701 u8 *b0 = reqctx->scratch_pad;
2702
2703 m = crypto_aead_authsize(aead);
2704
2705 memcpy(b0, reqctx->iv, 16);
2706
2707 lp = b0[0];
2708 l = lp + 1;
2709
2710 /* set m, bits 3-5 */
2711 *b0 |= (8 * ((m - 2) / 2));
2712
2713 /* set adata, bit 6, if associated data is used */
2714 if (req->assoclen)
2715 *b0 |= 64;
2716 rc = set_msg_len(b0 + 16 - l,
2717 (op_type == CHCR_DECRYPT_OP) ?
2718 req->cryptlen - m : req->cryptlen, l);
2719 }
2720
crypto_ccm_check_iv(const u8 * iv)2721 static inline int crypto_ccm_check_iv(const u8 *iv)
2722 {
2723 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2724 if (iv[0] < 1 || iv[0] > 7)
2725 return -EINVAL;
2726
2727 return 0;
2728 }
2729
ccm_format_packet(struct aead_request * req,struct chcr_aead_ctx * aeadctx,unsigned int sub_type,unsigned short op_type,unsigned int assoclen)2730 static int ccm_format_packet(struct aead_request *req,
2731 struct chcr_aead_ctx *aeadctx,
2732 unsigned int sub_type,
2733 unsigned short op_type,
2734 unsigned int assoclen)
2735 {
2736 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2737 int rc = 0;
2738
2739 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2740 reqctx->iv[0] = 3;
2741 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2742 memcpy(reqctx->iv + 4, req->iv, 8);
2743 memset(reqctx->iv + 12, 0, 4);
2744 } else {
2745 memcpy(reqctx->iv, req->iv, 16);
2746 }
2747 if (assoclen)
2748 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2749 htons(assoclen);
2750
2751 generate_b0(req, aeadctx, op_type);
2752 /* zero the ctr value */
2753 memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2754 return rc;
2755 }
2756
fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu * sec_cpl,unsigned int dst_size,struct aead_request * req,unsigned short op_type)2757 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2758 unsigned int dst_size,
2759 struct aead_request *req,
2760 unsigned short op_type)
2761 {
2762 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2763 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2764 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2765 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2766 unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2767 unsigned int ccm_xtra;
2768 unsigned int tag_offset = 0, auth_offset = 0;
2769 unsigned int assoclen;
2770
2771 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2772 assoclen = req->assoclen - 8;
2773 else
2774 assoclen = req->assoclen;
2775 ccm_xtra = CCM_B0_SIZE +
2776 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2777
2778 auth_offset = req->cryptlen ?
2779 (assoclen + IV + 1 + ccm_xtra) : 0;
2780 if (op_type == CHCR_DECRYPT_OP) {
2781 if (crypto_aead_authsize(tfm) != req->cryptlen)
2782 tag_offset = crypto_aead_authsize(tfm);
2783 else
2784 auth_offset = 0;
2785 }
2786
2787
2788 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2789 2, assoclen + 1 + ccm_xtra);
2790 sec_cpl->pldlen =
2791 htonl(assoclen + IV + req->cryptlen + ccm_xtra);
2792 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2793 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2794 1, assoclen + ccm_xtra, assoclen
2795 + IV + 1 + ccm_xtra, 0);
2796
2797 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2798 auth_offset, tag_offset,
2799 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2800 crypto_aead_authsize(tfm));
2801 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2802 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2803 cipher_mode, mac_mode,
2804 aeadctx->hmac_ctrl, IV >> 1);
2805
2806 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2807 0, dst_size);
2808 }
2809
aead_ccm_validate_input(unsigned short op_type,struct aead_request * req,struct chcr_aead_ctx * aeadctx,unsigned int sub_type)2810 static int aead_ccm_validate_input(unsigned short op_type,
2811 struct aead_request *req,
2812 struct chcr_aead_ctx *aeadctx,
2813 unsigned int sub_type)
2814 {
2815 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2816 if (crypto_ccm_check_iv(req->iv)) {
2817 pr_err("CCM: IV check fails\n");
2818 return -EINVAL;
2819 }
2820 } else {
2821 if (req->assoclen != 16 && req->assoclen != 20) {
2822 pr_err("RFC4309: Invalid AAD length %d\n",
2823 req->assoclen);
2824 return -EINVAL;
2825 }
2826 }
2827 return 0;
2828 }
2829
create_aead_ccm_wr(struct aead_request * req,unsigned short qid,int size)2830 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2831 unsigned short qid,
2832 int size)
2833 {
2834 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2835 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2836 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2837 struct sk_buff *skb = NULL;
2838 struct chcr_wr *chcr_req;
2839 struct cpl_rx_phys_dsgl *phys_cpl;
2840 struct ulptx_sgl *ulptx;
2841 unsigned int transhdr_len;
2842 unsigned int dst_size = 0, kctx_len, dnents, temp;
2843 unsigned int sub_type, assoclen = req->assoclen;
2844 unsigned int authsize = crypto_aead_authsize(tfm);
2845 int error = -EINVAL;
2846 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2847 GFP_ATOMIC;
2848 struct adapter *adap = padap(a_ctx(tfm)->dev);
2849
2850 sub_type = get_aead_subtype(tfm);
2851 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2852 assoclen -= 8;
2853 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2854 error = chcr_aead_common_init(req);
2855 if (error)
2856 return ERR_PTR(error);
2857
2858 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2859 if (error)
2860 goto err;
2861 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2862 dnents += sg_nents_xlen(req->dst, req->cryptlen
2863 + (reqctx->op ? -authsize : authsize),
2864 CHCR_DST_SG_SIZE, req->assoclen);
2865 dnents += MIN_CCM_SG; // For IV and B0
2866 dst_size = get_space_for_phys_dsgl(dnents);
2867 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2868 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2869 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2870 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2871 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
2872 reqctx->b0_len, 16) :
2873 (sgl_len(reqctx->src_nents + reqctx->aad_nents +
2874 MIN_CCM_SG) * 8);
2875 transhdr_len += temp;
2876 transhdr_len = roundup(transhdr_len, 16);
2877
2878 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2879 reqctx->b0_len, transhdr_len, reqctx->op)) {
2880 atomic_inc(&adap->chcr_stats.fallback);
2881 chcr_aead_common_exit(req);
2882 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2883 }
2884 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2885
2886 if (!skb) {
2887 error = -ENOMEM;
2888 goto err;
2889 }
2890
2891 chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2892
2893 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2894
2895 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2896 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2897 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2898 aeadctx->key, aeadctx->enckey_len);
2899
2900 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2901 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2902 error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
2903 if (error)
2904 goto dstmap_fail;
2905 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
2906 chcr_add_aead_src_ent(req, ulptx, assoclen);
2907
2908 atomic_inc(&adap->chcr_stats.aead_rqst);
2909 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2910 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
2911 reqctx->b0_len) : 0);
2912 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2913 transhdr_len, temp, 0);
2914 reqctx->skb = skb;
2915
2916 return skb;
2917 dstmap_fail:
2918 kfree_skb(skb);
2919 err:
2920 chcr_aead_common_exit(req);
2921 return ERR_PTR(error);
2922 }
2923
create_gcm_wr(struct aead_request * req,unsigned short qid,int size)2924 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2925 unsigned short qid,
2926 int size)
2927 {
2928 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2929 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2930 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2931 struct sk_buff *skb = NULL;
2932 struct chcr_wr *chcr_req;
2933 struct cpl_rx_phys_dsgl *phys_cpl;
2934 struct ulptx_sgl *ulptx;
2935 unsigned int transhdr_len, dnents = 0;
2936 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2937 unsigned int authsize = crypto_aead_authsize(tfm);
2938 int error = -EINVAL;
2939 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2940 GFP_ATOMIC;
2941 struct adapter *adap = padap(a_ctx(tfm)->dev);
2942
2943 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2944 assoclen = req->assoclen - 8;
2945
2946 reqctx->b0_len = 0;
2947 error = chcr_aead_common_init(req);
2948 if (error)
2949 return ERR_PTR(error);
2950 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2951 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2952 (reqctx->op ? -authsize : authsize),
2953 CHCR_DST_SG_SIZE, req->assoclen);
2954 dnents += MIN_GCM_SG; // For IV
2955 dst_size = get_space_for_phys_dsgl(dnents);
2956 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
2957 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2958 reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2959 SGE_MAX_WR_LEN;
2960 temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
2961 (sgl_len(reqctx->src_nents +
2962 reqctx->aad_nents + MIN_GCM_SG) * 8);
2963 transhdr_len += temp;
2964 transhdr_len = roundup(transhdr_len, 16);
2965 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2966 transhdr_len, reqctx->op)) {
2967
2968 atomic_inc(&adap->chcr_stats.fallback);
2969 chcr_aead_common_exit(req);
2970 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2971 }
2972 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2973 if (!skb) {
2974 error = -ENOMEM;
2975 goto err;
2976 }
2977
2978 chcr_req = __skb_put_zero(skb, transhdr_len);
2979
2980 //Offset of tag from end
2981 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2982 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2983 a_ctx(tfm)->dev->rx_channel_id, 2,
2984 (assoclen + 1));
2985 chcr_req->sec_cpl.pldlen =
2986 htonl(assoclen + IV + req->cryptlen);
2987 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2988 assoclen ? 1 : 0, assoclen,
2989 assoclen + IV + 1, 0);
2990 chcr_req->sec_cpl.cipherstop_lo_authinsert =
2991 FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
2992 temp, temp);
2993 chcr_req->sec_cpl.seqno_numivs =
2994 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
2995 CHCR_ENCRYPT_OP) ? 1 : 0,
2996 CHCR_SCMD_CIPHER_MODE_AES_GCM,
2997 CHCR_SCMD_AUTH_MODE_GHASH,
2998 aeadctx->hmac_ctrl, IV >> 1);
2999 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3000 0, 0, dst_size);
3001 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3002 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3003 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3004 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3005
3006 /* prepare a 16 byte iv */
3007 /* S A L T | IV | 0x00000001 */
3008 if (get_aead_subtype(tfm) ==
3009 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3010 memcpy(reqctx->iv, aeadctx->salt, 4);
3011 memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
3012 } else {
3013 memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
3014 }
3015 *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
3016
3017 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3018 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
3019
3020 chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
3021 chcr_add_aead_src_ent(req, ulptx, assoclen);
3022 atomic_inc(&adap->chcr_stats.aead_rqst);
3023 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
3024 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
3025 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3026 transhdr_len, temp, reqctx->verify);
3027 reqctx->skb = skb;
3028 return skb;
3029
3030 err:
3031 chcr_aead_common_exit(req);
3032 return ERR_PTR(error);
3033 }
3034
3035
3036
chcr_aead_cra_init(struct crypto_aead * tfm)3037 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3038 {
3039 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3040 struct aead_alg *alg = crypto_aead_alg(tfm);
3041
3042 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3043 CRYPTO_ALG_NEED_FALLBACK |
3044 CRYPTO_ALG_ASYNC);
3045 if (IS_ERR(aeadctx->sw_cipher))
3046 return PTR_ERR(aeadctx->sw_cipher);
3047 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3048 sizeof(struct aead_request) +
3049 crypto_aead_reqsize(aeadctx->sw_cipher)));
3050 return chcr_device_init(a_ctx(tfm));
3051 }
3052
chcr_aead_cra_exit(struct crypto_aead * tfm)3053 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3054 {
3055 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3056
3057 crypto_free_aead(aeadctx->sw_cipher);
3058 }
3059
chcr_authenc_null_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3060 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3061 unsigned int authsize)
3062 {
3063 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3064
3065 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3066 aeadctx->mayverify = VERIFY_HW;
3067 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3068 }
chcr_authenc_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3069 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3070 unsigned int authsize)
3071 {
3072 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3073 u32 maxauth = crypto_aead_maxauthsize(tfm);
3074
3075 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3076 * true for sha1. authsize == 12 condition should be before
3077 * authsize == (maxauth >> 1)
3078 */
3079 if (authsize == ICV_4) {
3080 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3081 aeadctx->mayverify = VERIFY_HW;
3082 } else if (authsize == ICV_6) {
3083 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3084 aeadctx->mayverify = VERIFY_HW;
3085 } else if (authsize == ICV_10) {
3086 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3087 aeadctx->mayverify = VERIFY_HW;
3088 } else if (authsize == ICV_12) {
3089 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3090 aeadctx->mayverify = VERIFY_HW;
3091 } else if (authsize == ICV_14) {
3092 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3093 aeadctx->mayverify = VERIFY_HW;
3094 } else if (authsize == (maxauth >> 1)) {
3095 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3096 aeadctx->mayverify = VERIFY_HW;
3097 } else if (authsize == maxauth) {
3098 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3099 aeadctx->mayverify = VERIFY_HW;
3100 } else {
3101 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3102 aeadctx->mayverify = VERIFY_SW;
3103 }
3104 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3105 }
3106
3107
chcr_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3108 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3109 {
3110 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3111
3112 switch (authsize) {
3113 case ICV_4:
3114 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3115 aeadctx->mayverify = VERIFY_HW;
3116 break;
3117 case ICV_8:
3118 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3119 aeadctx->mayverify = VERIFY_HW;
3120 break;
3121 case ICV_12:
3122 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3123 aeadctx->mayverify = VERIFY_HW;
3124 break;
3125 case ICV_14:
3126 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3127 aeadctx->mayverify = VERIFY_HW;
3128 break;
3129 case ICV_16:
3130 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3131 aeadctx->mayverify = VERIFY_HW;
3132 break;
3133 case ICV_13:
3134 case ICV_15:
3135 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3136 aeadctx->mayverify = VERIFY_SW;
3137 break;
3138 default:
3139 return -EINVAL;
3140 }
3141 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3142 }
3143
chcr_4106_4309_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3144 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3145 unsigned int authsize)
3146 {
3147 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3148
3149 switch (authsize) {
3150 case ICV_8:
3151 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3152 aeadctx->mayverify = VERIFY_HW;
3153 break;
3154 case ICV_12:
3155 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3156 aeadctx->mayverify = VERIFY_HW;
3157 break;
3158 case ICV_16:
3159 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3160 aeadctx->mayverify = VERIFY_HW;
3161 break;
3162 default:
3163 return -EINVAL;
3164 }
3165 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3166 }
3167
chcr_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3168 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3169 unsigned int authsize)
3170 {
3171 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3172
3173 switch (authsize) {
3174 case ICV_4:
3175 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3176 aeadctx->mayverify = VERIFY_HW;
3177 break;
3178 case ICV_6:
3179 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3180 aeadctx->mayverify = VERIFY_HW;
3181 break;
3182 case ICV_8:
3183 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3184 aeadctx->mayverify = VERIFY_HW;
3185 break;
3186 case ICV_10:
3187 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3188 aeadctx->mayverify = VERIFY_HW;
3189 break;
3190 case ICV_12:
3191 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3192 aeadctx->mayverify = VERIFY_HW;
3193 break;
3194 case ICV_14:
3195 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3196 aeadctx->mayverify = VERIFY_HW;
3197 break;
3198 case ICV_16:
3199 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3200 aeadctx->mayverify = VERIFY_HW;
3201 break;
3202 default:
3203 return -EINVAL;
3204 }
3205 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3206 }
3207
chcr_ccm_common_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3208 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3209 const u8 *key,
3210 unsigned int keylen)
3211 {
3212 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3213 unsigned char ck_size, mk_size;
3214 int key_ctx_size = 0;
3215
3216 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3217 if (keylen == AES_KEYSIZE_128) {
3218 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3219 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3220 } else if (keylen == AES_KEYSIZE_192) {
3221 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3222 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3223 } else if (keylen == AES_KEYSIZE_256) {
3224 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3225 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3226 } else {
3227 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
3228 aeadctx->enckey_len = 0;
3229 return -EINVAL;
3230 }
3231 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3232 key_ctx_size >> 4);
3233 memcpy(aeadctx->key, key, keylen);
3234 aeadctx->enckey_len = keylen;
3235
3236 return 0;
3237 }
3238
chcr_aead_ccm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3239 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3240 const u8 *key,
3241 unsigned int keylen)
3242 {
3243 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3244 int error;
3245
3246 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3247 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3248 CRYPTO_TFM_REQ_MASK);
3249 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3250 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3251 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3252 CRYPTO_TFM_RES_MASK);
3253 if (error)
3254 return error;
3255 return chcr_ccm_common_setkey(aead, key, keylen);
3256 }
3257
chcr_aead_rfc4309_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3258 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3259 unsigned int keylen)
3260 {
3261 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3262 int error;
3263
3264 if (keylen < 3) {
3265 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
3266 aeadctx->enckey_len = 0;
3267 return -EINVAL;
3268 }
3269 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3270 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3271 CRYPTO_TFM_REQ_MASK);
3272 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3273 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3274 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3275 CRYPTO_TFM_RES_MASK);
3276 if (error)
3277 return error;
3278 keylen -= 3;
3279 memcpy(aeadctx->salt, key + keylen, 3);
3280 return chcr_ccm_common_setkey(aead, key, keylen);
3281 }
3282
chcr_gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3283 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3284 unsigned int keylen)
3285 {
3286 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3287 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3288 struct crypto_cipher *cipher;
3289 unsigned int ck_size;
3290 int ret = 0, key_ctx_size = 0;
3291
3292 aeadctx->enckey_len = 0;
3293 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3294 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3295 & CRYPTO_TFM_REQ_MASK);
3296 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3297 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3298 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3299 CRYPTO_TFM_RES_MASK);
3300 if (ret)
3301 goto out;
3302
3303 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3304 keylen > 3) {
3305 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3306 memcpy(aeadctx->salt, key + keylen, 4);
3307 }
3308 if (keylen == AES_KEYSIZE_128) {
3309 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3310 } else if (keylen == AES_KEYSIZE_192) {
3311 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3312 } else if (keylen == AES_KEYSIZE_256) {
3313 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3314 } else {
3315 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
3316 pr_err("GCM: Invalid key length %d\n", keylen);
3317 ret = -EINVAL;
3318 goto out;
3319 }
3320
3321 memcpy(aeadctx->key, key, keylen);
3322 aeadctx->enckey_len = keylen;
3323 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3324 AEAD_H_SIZE;
3325 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3326 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3327 0, 0,
3328 key_ctx_size >> 4);
3329 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3330 * It will go in key context
3331 */
3332 cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3333 if (IS_ERR(cipher)) {
3334 aeadctx->enckey_len = 0;
3335 ret = -ENOMEM;
3336 goto out;
3337 }
3338
3339 ret = crypto_cipher_setkey(cipher, key, keylen);
3340 if (ret) {
3341 aeadctx->enckey_len = 0;
3342 goto out1;
3343 }
3344 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3345 crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3346
3347 out1:
3348 crypto_free_cipher(cipher);
3349 out:
3350 return ret;
3351 }
3352
chcr_authenc_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3353 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3354 unsigned int keylen)
3355 {
3356 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3357 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3358 /* it contains auth and cipher key both*/
3359 struct crypto_authenc_keys keys;
3360 unsigned int bs, subtype;
3361 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3362 int err = 0, i, key_ctx_len = 0;
3363 unsigned char ck_size = 0;
3364 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3365 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3366 struct algo_param param;
3367 int align;
3368 u8 *o_ptr = NULL;
3369
3370 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3371 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3372 & CRYPTO_TFM_REQ_MASK);
3373 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3374 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3375 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3376 & CRYPTO_TFM_RES_MASK);
3377 if (err)
3378 goto out;
3379
3380 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3381 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3382 goto out;
3383 }
3384
3385 if (get_alg_config(¶m, max_authsize)) {
3386 pr_err("chcr : Unsupported digest size\n");
3387 goto out;
3388 }
3389 subtype = get_aead_subtype(authenc);
3390 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3391 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3392 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3393 goto out;
3394 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3395 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3396 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3397 }
3398 if (keys.enckeylen == AES_KEYSIZE_128) {
3399 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3400 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3401 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3402 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3403 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3404 } else {
3405 pr_err("chcr : Unsupported cipher key\n");
3406 goto out;
3407 }
3408
3409 /* Copy only encryption key. We use authkey to generate h(ipad) and
3410 * h(opad) so authkey is not needed again. authkeylen size have the
3411 * size of the hash digest size.
3412 */
3413 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3414 aeadctx->enckey_len = keys.enckeylen;
3415 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3416 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3417
3418 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3419 aeadctx->enckey_len << 3);
3420 }
3421 base_hash = chcr_alloc_shash(max_authsize);
3422 if (IS_ERR(base_hash)) {
3423 pr_err("chcr : Base driver cannot be loaded\n");
3424 aeadctx->enckey_len = 0;
3425 memzero_explicit(&keys, sizeof(keys));
3426 return -EINVAL;
3427 }
3428 {
3429 SHASH_DESC_ON_STACK(shash, base_hash);
3430
3431 shash->tfm = base_hash;
3432 shash->flags = crypto_shash_get_flags(base_hash);
3433 bs = crypto_shash_blocksize(base_hash);
3434 align = KEYCTX_ALIGN_PAD(max_authsize);
3435 o_ptr = actx->h_iopad + param.result_size + align;
3436
3437 if (keys.authkeylen > bs) {
3438 err = crypto_shash_digest(shash, keys.authkey,
3439 keys.authkeylen,
3440 o_ptr);
3441 if (err) {
3442 pr_err("chcr : Base driver cannot be loaded\n");
3443 goto out;
3444 }
3445 keys.authkeylen = max_authsize;
3446 } else
3447 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3448
3449 /* Compute the ipad-digest*/
3450 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3451 memcpy(pad, o_ptr, keys.authkeylen);
3452 for (i = 0; i < bs >> 2; i++)
3453 *((unsigned int *)pad + i) ^= IPAD_DATA;
3454
3455 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3456 max_authsize))
3457 goto out;
3458 /* Compute the opad-digest */
3459 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3460 memcpy(pad, o_ptr, keys.authkeylen);
3461 for (i = 0; i < bs >> 2; i++)
3462 *((unsigned int *)pad + i) ^= OPAD_DATA;
3463
3464 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3465 goto out;
3466
3467 /* convert the ipad and opad digest to network order */
3468 chcr_change_order(actx->h_iopad, param.result_size);
3469 chcr_change_order(o_ptr, param.result_size);
3470 key_ctx_len = sizeof(struct _key_ctx) +
3471 roundup(keys.enckeylen, 16) +
3472 (param.result_size + align) * 2;
3473 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3474 0, 1, key_ctx_len >> 4);
3475 actx->auth_mode = param.auth_mode;
3476 chcr_free_shash(base_hash);
3477
3478 memzero_explicit(&keys, sizeof(keys));
3479 return 0;
3480 }
3481 out:
3482 aeadctx->enckey_len = 0;
3483 memzero_explicit(&keys, sizeof(keys));
3484 if (!IS_ERR(base_hash))
3485 chcr_free_shash(base_hash);
3486 return -EINVAL;
3487 }
3488
chcr_aead_digest_null_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3489 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3490 const u8 *key, unsigned int keylen)
3491 {
3492 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3493 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3494 struct crypto_authenc_keys keys;
3495 int err;
3496 /* it contains auth and cipher key both*/
3497 unsigned int subtype;
3498 int key_ctx_len = 0;
3499 unsigned char ck_size = 0;
3500
3501 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3502 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3503 & CRYPTO_TFM_REQ_MASK);
3504 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3505 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3506 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3507 & CRYPTO_TFM_RES_MASK);
3508 if (err)
3509 goto out;
3510
3511 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3512 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3513 goto out;
3514 }
3515 subtype = get_aead_subtype(authenc);
3516 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3517 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3518 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3519 goto out;
3520 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3521 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3522 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3523 }
3524 if (keys.enckeylen == AES_KEYSIZE_128) {
3525 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3526 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3527 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3528 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3529 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3530 } else {
3531 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3532 goto out;
3533 }
3534 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3535 aeadctx->enckey_len = keys.enckeylen;
3536 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3537 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3538 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3539 aeadctx->enckey_len << 3);
3540 }
3541 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3542
3543 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3544 0, key_ctx_len >> 4);
3545 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3546 memzero_explicit(&keys, sizeof(keys));
3547 return 0;
3548 out:
3549 aeadctx->enckey_len = 0;
3550 memzero_explicit(&keys, sizeof(keys));
3551 return -EINVAL;
3552 }
3553
chcr_aead_op(struct aead_request * req,int size,create_wr_t create_wr_fn)3554 static int chcr_aead_op(struct aead_request *req,
3555 int size,
3556 create_wr_t create_wr_fn)
3557 {
3558 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3559 struct uld_ctx *u_ctx;
3560 struct sk_buff *skb;
3561 int isfull = 0;
3562
3563 if (!a_ctx(tfm)->dev) {
3564 pr_err("chcr : %s : No crypto device.\n", __func__);
3565 return -ENXIO;
3566 }
3567 u_ctx = ULD_CTX(a_ctx(tfm));
3568 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3569 a_ctx(tfm)->tx_qidx)) {
3570 isfull = 1;
3571 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3572 return -ENOSPC;
3573 }
3574
3575 /* Form a WR from req */
3576 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3577
3578 if (IS_ERR(skb) || !skb)
3579 return PTR_ERR(skb);
3580
3581 skb->dev = u_ctx->lldi.ports[0];
3582 set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3583 chcr_send_wr(skb);
3584 return isfull ? -EBUSY : -EINPROGRESS;
3585 }
3586
chcr_aead_encrypt(struct aead_request * req)3587 static int chcr_aead_encrypt(struct aead_request *req)
3588 {
3589 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3590 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3591
3592 reqctx->verify = VERIFY_HW;
3593 reqctx->op = CHCR_ENCRYPT_OP;
3594
3595 switch (get_aead_subtype(tfm)) {
3596 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3597 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3598 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3599 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3600 return chcr_aead_op(req, 0, create_authenc_wr);
3601 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3602 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3603 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3604 default:
3605 return chcr_aead_op(req, 0, create_gcm_wr);
3606 }
3607 }
3608
chcr_aead_decrypt(struct aead_request * req)3609 static int chcr_aead_decrypt(struct aead_request *req)
3610 {
3611 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3612 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3613 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3614 int size;
3615
3616 if (aeadctx->mayverify == VERIFY_SW) {
3617 size = crypto_aead_maxauthsize(tfm);
3618 reqctx->verify = VERIFY_SW;
3619 } else {
3620 size = 0;
3621 reqctx->verify = VERIFY_HW;
3622 }
3623 reqctx->op = CHCR_DECRYPT_OP;
3624 switch (get_aead_subtype(tfm)) {
3625 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3626 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3627 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3628 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3629 return chcr_aead_op(req, size, create_authenc_wr);
3630 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3631 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3632 return chcr_aead_op(req, size, create_aead_ccm_wr);
3633 default:
3634 return chcr_aead_op(req, size, create_gcm_wr);
3635 }
3636 }
3637
3638 static struct chcr_alg_template driver_algs[] = {
3639 /* AES-CBC */
3640 {
3641 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3642 .is_registered = 0,
3643 .alg.crypto = {
3644 .cra_name = "cbc(aes)",
3645 .cra_driver_name = "cbc-aes-chcr",
3646 .cra_blocksize = AES_BLOCK_SIZE,
3647 .cra_init = chcr_cra_init,
3648 .cra_exit = chcr_cra_exit,
3649 .cra_u.ablkcipher = {
3650 .min_keysize = AES_MIN_KEY_SIZE,
3651 .max_keysize = AES_MAX_KEY_SIZE,
3652 .ivsize = AES_BLOCK_SIZE,
3653 .setkey = chcr_aes_cbc_setkey,
3654 .encrypt = chcr_aes_encrypt,
3655 .decrypt = chcr_aes_decrypt,
3656 }
3657 }
3658 },
3659 {
3660 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3661 .is_registered = 0,
3662 .alg.crypto = {
3663 .cra_name = "xts(aes)",
3664 .cra_driver_name = "xts-aes-chcr",
3665 .cra_blocksize = AES_BLOCK_SIZE,
3666 .cra_init = chcr_cra_init,
3667 .cra_exit = NULL,
3668 .cra_u .ablkcipher = {
3669 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3670 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3671 .ivsize = AES_BLOCK_SIZE,
3672 .setkey = chcr_aes_xts_setkey,
3673 .encrypt = chcr_aes_encrypt,
3674 .decrypt = chcr_aes_decrypt,
3675 }
3676 }
3677 },
3678 {
3679 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3680 .is_registered = 0,
3681 .alg.crypto = {
3682 .cra_name = "ctr(aes)",
3683 .cra_driver_name = "ctr-aes-chcr",
3684 .cra_blocksize = 1,
3685 .cra_init = chcr_cra_init,
3686 .cra_exit = chcr_cra_exit,
3687 .cra_u.ablkcipher = {
3688 .min_keysize = AES_MIN_KEY_SIZE,
3689 .max_keysize = AES_MAX_KEY_SIZE,
3690 .ivsize = AES_BLOCK_SIZE,
3691 .setkey = chcr_aes_ctr_setkey,
3692 .encrypt = chcr_aes_encrypt,
3693 .decrypt = chcr_aes_decrypt,
3694 }
3695 }
3696 },
3697 {
3698 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3699 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3700 .is_registered = 0,
3701 .alg.crypto = {
3702 .cra_name = "rfc3686(ctr(aes))",
3703 .cra_driver_name = "rfc3686-ctr-aes-chcr",
3704 .cra_blocksize = 1,
3705 .cra_init = chcr_rfc3686_init,
3706 .cra_exit = chcr_cra_exit,
3707 .cra_u.ablkcipher = {
3708 .min_keysize = AES_MIN_KEY_SIZE +
3709 CTR_RFC3686_NONCE_SIZE,
3710 .max_keysize = AES_MAX_KEY_SIZE +
3711 CTR_RFC3686_NONCE_SIZE,
3712 .ivsize = CTR_RFC3686_IV_SIZE,
3713 .setkey = chcr_aes_rfc3686_setkey,
3714 .encrypt = chcr_aes_encrypt,
3715 .decrypt = chcr_aes_decrypt,
3716 .geniv = "seqiv",
3717 }
3718 }
3719 },
3720 /* SHA */
3721 {
3722 .type = CRYPTO_ALG_TYPE_AHASH,
3723 .is_registered = 0,
3724 .alg.hash = {
3725 .halg.digestsize = SHA1_DIGEST_SIZE,
3726 .halg.base = {
3727 .cra_name = "sha1",
3728 .cra_driver_name = "sha1-chcr",
3729 .cra_blocksize = SHA1_BLOCK_SIZE,
3730 }
3731 }
3732 },
3733 {
3734 .type = CRYPTO_ALG_TYPE_AHASH,
3735 .is_registered = 0,
3736 .alg.hash = {
3737 .halg.digestsize = SHA256_DIGEST_SIZE,
3738 .halg.base = {
3739 .cra_name = "sha256",
3740 .cra_driver_name = "sha256-chcr",
3741 .cra_blocksize = SHA256_BLOCK_SIZE,
3742 }
3743 }
3744 },
3745 {
3746 .type = CRYPTO_ALG_TYPE_AHASH,
3747 .is_registered = 0,
3748 .alg.hash = {
3749 .halg.digestsize = SHA224_DIGEST_SIZE,
3750 .halg.base = {
3751 .cra_name = "sha224",
3752 .cra_driver_name = "sha224-chcr",
3753 .cra_blocksize = SHA224_BLOCK_SIZE,
3754 }
3755 }
3756 },
3757 {
3758 .type = CRYPTO_ALG_TYPE_AHASH,
3759 .is_registered = 0,
3760 .alg.hash = {
3761 .halg.digestsize = SHA384_DIGEST_SIZE,
3762 .halg.base = {
3763 .cra_name = "sha384",
3764 .cra_driver_name = "sha384-chcr",
3765 .cra_blocksize = SHA384_BLOCK_SIZE,
3766 }
3767 }
3768 },
3769 {
3770 .type = CRYPTO_ALG_TYPE_AHASH,
3771 .is_registered = 0,
3772 .alg.hash = {
3773 .halg.digestsize = SHA512_DIGEST_SIZE,
3774 .halg.base = {
3775 .cra_name = "sha512",
3776 .cra_driver_name = "sha512-chcr",
3777 .cra_blocksize = SHA512_BLOCK_SIZE,
3778 }
3779 }
3780 },
3781 /* HMAC */
3782 {
3783 .type = CRYPTO_ALG_TYPE_HMAC,
3784 .is_registered = 0,
3785 .alg.hash = {
3786 .halg.digestsize = SHA1_DIGEST_SIZE,
3787 .halg.base = {
3788 .cra_name = "hmac(sha1)",
3789 .cra_driver_name = "hmac-sha1-chcr",
3790 .cra_blocksize = SHA1_BLOCK_SIZE,
3791 }
3792 }
3793 },
3794 {
3795 .type = CRYPTO_ALG_TYPE_HMAC,
3796 .is_registered = 0,
3797 .alg.hash = {
3798 .halg.digestsize = SHA224_DIGEST_SIZE,
3799 .halg.base = {
3800 .cra_name = "hmac(sha224)",
3801 .cra_driver_name = "hmac-sha224-chcr",
3802 .cra_blocksize = SHA224_BLOCK_SIZE,
3803 }
3804 }
3805 },
3806 {
3807 .type = CRYPTO_ALG_TYPE_HMAC,
3808 .is_registered = 0,
3809 .alg.hash = {
3810 .halg.digestsize = SHA256_DIGEST_SIZE,
3811 .halg.base = {
3812 .cra_name = "hmac(sha256)",
3813 .cra_driver_name = "hmac-sha256-chcr",
3814 .cra_blocksize = SHA256_BLOCK_SIZE,
3815 }
3816 }
3817 },
3818 {
3819 .type = CRYPTO_ALG_TYPE_HMAC,
3820 .is_registered = 0,
3821 .alg.hash = {
3822 .halg.digestsize = SHA384_DIGEST_SIZE,
3823 .halg.base = {
3824 .cra_name = "hmac(sha384)",
3825 .cra_driver_name = "hmac-sha384-chcr",
3826 .cra_blocksize = SHA384_BLOCK_SIZE,
3827 }
3828 }
3829 },
3830 {
3831 .type = CRYPTO_ALG_TYPE_HMAC,
3832 .is_registered = 0,
3833 .alg.hash = {
3834 .halg.digestsize = SHA512_DIGEST_SIZE,
3835 .halg.base = {
3836 .cra_name = "hmac(sha512)",
3837 .cra_driver_name = "hmac-sha512-chcr",
3838 .cra_blocksize = SHA512_BLOCK_SIZE,
3839 }
3840 }
3841 },
3842 /* Add AEAD Algorithms */
3843 {
3844 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3845 .is_registered = 0,
3846 .alg.aead = {
3847 .base = {
3848 .cra_name = "gcm(aes)",
3849 .cra_driver_name = "gcm-aes-chcr",
3850 .cra_blocksize = 1,
3851 .cra_priority = CHCR_AEAD_PRIORITY,
3852 .cra_ctxsize = sizeof(struct chcr_context) +
3853 sizeof(struct chcr_aead_ctx) +
3854 sizeof(struct chcr_gcm_ctx),
3855 },
3856 .ivsize = GCM_AES_IV_SIZE,
3857 .maxauthsize = GHASH_DIGEST_SIZE,
3858 .setkey = chcr_gcm_setkey,
3859 .setauthsize = chcr_gcm_setauthsize,
3860 }
3861 },
3862 {
3863 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3864 .is_registered = 0,
3865 .alg.aead = {
3866 .base = {
3867 .cra_name = "rfc4106(gcm(aes))",
3868 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3869 .cra_blocksize = 1,
3870 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3871 .cra_ctxsize = sizeof(struct chcr_context) +
3872 sizeof(struct chcr_aead_ctx) +
3873 sizeof(struct chcr_gcm_ctx),
3874
3875 },
3876 .ivsize = GCM_RFC4106_IV_SIZE,
3877 .maxauthsize = GHASH_DIGEST_SIZE,
3878 .setkey = chcr_gcm_setkey,
3879 .setauthsize = chcr_4106_4309_setauthsize,
3880 }
3881 },
3882 {
3883 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3884 .is_registered = 0,
3885 .alg.aead = {
3886 .base = {
3887 .cra_name = "ccm(aes)",
3888 .cra_driver_name = "ccm-aes-chcr",
3889 .cra_blocksize = 1,
3890 .cra_priority = CHCR_AEAD_PRIORITY,
3891 .cra_ctxsize = sizeof(struct chcr_context) +
3892 sizeof(struct chcr_aead_ctx),
3893
3894 },
3895 .ivsize = AES_BLOCK_SIZE,
3896 .maxauthsize = GHASH_DIGEST_SIZE,
3897 .setkey = chcr_aead_ccm_setkey,
3898 .setauthsize = chcr_ccm_setauthsize,
3899 }
3900 },
3901 {
3902 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3903 .is_registered = 0,
3904 .alg.aead = {
3905 .base = {
3906 .cra_name = "rfc4309(ccm(aes))",
3907 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3908 .cra_blocksize = 1,
3909 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3910 .cra_ctxsize = sizeof(struct chcr_context) +
3911 sizeof(struct chcr_aead_ctx),
3912
3913 },
3914 .ivsize = 8,
3915 .maxauthsize = GHASH_DIGEST_SIZE,
3916 .setkey = chcr_aead_rfc4309_setkey,
3917 .setauthsize = chcr_4106_4309_setauthsize,
3918 }
3919 },
3920 {
3921 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3922 .is_registered = 0,
3923 .alg.aead = {
3924 .base = {
3925 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3926 .cra_driver_name =
3927 "authenc-hmac-sha1-cbc-aes-chcr",
3928 .cra_blocksize = AES_BLOCK_SIZE,
3929 .cra_priority = CHCR_AEAD_PRIORITY,
3930 .cra_ctxsize = sizeof(struct chcr_context) +
3931 sizeof(struct chcr_aead_ctx) +
3932 sizeof(struct chcr_authenc_ctx),
3933
3934 },
3935 .ivsize = AES_BLOCK_SIZE,
3936 .maxauthsize = SHA1_DIGEST_SIZE,
3937 .setkey = chcr_authenc_setkey,
3938 .setauthsize = chcr_authenc_setauthsize,
3939 }
3940 },
3941 {
3942 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3943 .is_registered = 0,
3944 .alg.aead = {
3945 .base = {
3946
3947 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3948 .cra_driver_name =
3949 "authenc-hmac-sha256-cbc-aes-chcr",
3950 .cra_blocksize = AES_BLOCK_SIZE,
3951 .cra_priority = CHCR_AEAD_PRIORITY,
3952 .cra_ctxsize = sizeof(struct chcr_context) +
3953 sizeof(struct chcr_aead_ctx) +
3954 sizeof(struct chcr_authenc_ctx),
3955
3956 },
3957 .ivsize = AES_BLOCK_SIZE,
3958 .maxauthsize = SHA256_DIGEST_SIZE,
3959 .setkey = chcr_authenc_setkey,
3960 .setauthsize = chcr_authenc_setauthsize,
3961 }
3962 },
3963 {
3964 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3965 .is_registered = 0,
3966 .alg.aead = {
3967 .base = {
3968 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3969 .cra_driver_name =
3970 "authenc-hmac-sha224-cbc-aes-chcr",
3971 .cra_blocksize = AES_BLOCK_SIZE,
3972 .cra_priority = CHCR_AEAD_PRIORITY,
3973 .cra_ctxsize = sizeof(struct chcr_context) +
3974 sizeof(struct chcr_aead_ctx) +
3975 sizeof(struct chcr_authenc_ctx),
3976 },
3977 .ivsize = AES_BLOCK_SIZE,
3978 .maxauthsize = SHA224_DIGEST_SIZE,
3979 .setkey = chcr_authenc_setkey,
3980 .setauthsize = chcr_authenc_setauthsize,
3981 }
3982 },
3983 {
3984 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3985 .is_registered = 0,
3986 .alg.aead = {
3987 .base = {
3988 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3989 .cra_driver_name =
3990 "authenc-hmac-sha384-cbc-aes-chcr",
3991 .cra_blocksize = AES_BLOCK_SIZE,
3992 .cra_priority = CHCR_AEAD_PRIORITY,
3993 .cra_ctxsize = sizeof(struct chcr_context) +
3994 sizeof(struct chcr_aead_ctx) +
3995 sizeof(struct chcr_authenc_ctx),
3996
3997 },
3998 .ivsize = AES_BLOCK_SIZE,
3999 .maxauthsize = SHA384_DIGEST_SIZE,
4000 .setkey = chcr_authenc_setkey,
4001 .setauthsize = chcr_authenc_setauthsize,
4002 }
4003 },
4004 {
4005 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4006 .is_registered = 0,
4007 .alg.aead = {
4008 .base = {
4009 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4010 .cra_driver_name =
4011 "authenc-hmac-sha512-cbc-aes-chcr",
4012 .cra_blocksize = AES_BLOCK_SIZE,
4013 .cra_priority = CHCR_AEAD_PRIORITY,
4014 .cra_ctxsize = sizeof(struct chcr_context) +
4015 sizeof(struct chcr_aead_ctx) +
4016 sizeof(struct chcr_authenc_ctx),
4017
4018 },
4019 .ivsize = AES_BLOCK_SIZE,
4020 .maxauthsize = SHA512_DIGEST_SIZE,
4021 .setkey = chcr_authenc_setkey,
4022 .setauthsize = chcr_authenc_setauthsize,
4023 }
4024 },
4025 {
4026 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4027 .is_registered = 0,
4028 .alg.aead = {
4029 .base = {
4030 .cra_name = "authenc(digest_null,cbc(aes))",
4031 .cra_driver_name =
4032 "authenc-digest_null-cbc-aes-chcr",
4033 .cra_blocksize = AES_BLOCK_SIZE,
4034 .cra_priority = CHCR_AEAD_PRIORITY,
4035 .cra_ctxsize = sizeof(struct chcr_context) +
4036 sizeof(struct chcr_aead_ctx) +
4037 sizeof(struct chcr_authenc_ctx),
4038
4039 },
4040 .ivsize = AES_BLOCK_SIZE,
4041 .maxauthsize = 0,
4042 .setkey = chcr_aead_digest_null_setkey,
4043 .setauthsize = chcr_authenc_null_setauthsize,
4044 }
4045 },
4046 {
4047 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4048 .is_registered = 0,
4049 .alg.aead = {
4050 .base = {
4051 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4052 .cra_driver_name =
4053 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4054 .cra_blocksize = 1,
4055 .cra_priority = CHCR_AEAD_PRIORITY,
4056 .cra_ctxsize = sizeof(struct chcr_context) +
4057 sizeof(struct chcr_aead_ctx) +
4058 sizeof(struct chcr_authenc_ctx),
4059
4060 },
4061 .ivsize = CTR_RFC3686_IV_SIZE,
4062 .maxauthsize = SHA1_DIGEST_SIZE,
4063 .setkey = chcr_authenc_setkey,
4064 .setauthsize = chcr_authenc_setauthsize,
4065 }
4066 },
4067 {
4068 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4069 .is_registered = 0,
4070 .alg.aead = {
4071 .base = {
4072
4073 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4074 .cra_driver_name =
4075 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4076 .cra_blocksize = 1,
4077 .cra_priority = CHCR_AEAD_PRIORITY,
4078 .cra_ctxsize = sizeof(struct chcr_context) +
4079 sizeof(struct chcr_aead_ctx) +
4080 sizeof(struct chcr_authenc_ctx),
4081
4082 },
4083 .ivsize = CTR_RFC3686_IV_SIZE,
4084 .maxauthsize = SHA256_DIGEST_SIZE,
4085 .setkey = chcr_authenc_setkey,
4086 .setauthsize = chcr_authenc_setauthsize,
4087 }
4088 },
4089 {
4090 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4091 .is_registered = 0,
4092 .alg.aead = {
4093 .base = {
4094 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4095 .cra_driver_name =
4096 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4097 .cra_blocksize = 1,
4098 .cra_priority = CHCR_AEAD_PRIORITY,
4099 .cra_ctxsize = sizeof(struct chcr_context) +
4100 sizeof(struct chcr_aead_ctx) +
4101 sizeof(struct chcr_authenc_ctx),
4102 },
4103 .ivsize = CTR_RFC3686_IV_SIZE,
4104 .maxauthsize = SHA224_DIGEST_SIZE,
4105 .setkey = chcr_authenc_setkey,
4106 .setauthsize = chcr_authenc_setauthsize,
4107 }
4108 },
4109 {
4110 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4111 .is_registered = 0,
4112 .alg.aead = {
4113 .base = {
4114 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4115 .cra_driver_name =
4116 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4117 .cra_blocksize = 1,
4118 .cra_priority = CHCR_AEAD_PRIORITY,
4119 .cra_ctxsize = sizeof(struct chcr_context) +
4120 sizeof(struct chcr_aead_ctx) +
4121 sizeof(struct chcr_authenc_ctx),
4122
4123 },
4124 .ivsize = CTR_RFC3686_IV_SIZE,
4125 .maxauthsize = SHA384_DIGEST_SIZE,
4126 .setkey = chcr_authenc_setkey,
4127 .setauthsize = chcr_authenc_setauthsize,
4128 }
4129 },
4130 {
4131 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4132 .is_registered = 0,
4133 .alg.aead = {
4134 .base = {
4135 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4136 .cra_driver_name =
4137 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4138 .cra_blocksize = 1,
4139 .cra_priority = CHCR_AEAD_PRIORITY,
4140 .cra_ctxsize = sizeof(struct chcr_context) +
4141 sizeof(struct chcr_aead_ctx) +
4142 sizeof(struct chcr_authenc_ctx),
4143
4144 },
4145 .ivsize = CTR_RFC3686_IV_SIZE,
4146 .maxauthsize = SHA512_DIGEST_SIZE,
4147 .setkey = chcr_authenc_setkey,
4148 .setauthsize = chcr_authenc_setauthsize,
4149 }
4150 },
4151 {
4152 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4153 .is_registered = 0,
4154 .alg.aead = {
4155 .base = {
4156 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4157 .cra_driver_name =
4158 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4159 .cra_blocksize = 1,
4160 .cra_priority = CHCR_AEAD_PRIORITY,
4161 .cra_ctxsize = sizeof(struct chcr_context) +
4162 sizeof(struct chcr_aead_ctx) +
4163 sizeof(struct chcr_authenc_ctx),
4164
4165 },
4166 .ivsize = CTR_RFC3686_IV_SIZE,
4167 .maxauthsize = 0,
4168 .setkey = chcr_aead_digest_null_setkey,
4169 .setauthsize = chcr_authenc_null_setauthsize,
4170 }
4171 },
4172
4173 };
4174
4175 /*
4176 * chcr_unregister_alg - Deregister crypto algorithms with
4177 * kernel framework.
4178 */
chcr_unregister_alg(void)4179 static int chcr_unregister_alg(void)
4180 {
4181 int i;
4182
4183 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4184 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4185 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4186 if (driver_algs[i].is_registered)
4187 crypto_unregister_alg(
4188 &driver_algs[i].alg.crypto);
4189 break;
4190 case CRYPTO_ALG_TYPE_AEAD:
4191 if (driver_algs[i].is_registered)
4192 crypto_unregister_aead(
4193 &driver_algs[i].alg.aead);
4194 break;
4195 case CRYPTO_ALG_TYPE_AHASH:
4196 if (driver_algs[i].is_registered)
4197 crypto_unregister_ahash(
4198 &driver_algs[i].alg.hash);
4199 break;
4200 }
4201 driver_algs[i].is_registered = 0;
4202 }
4203 return 0;
4204 }
4205
4206 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4207 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4208 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4209
4210 /*
4211 * chcr_register_alg - Register crypto algorithms with kernel framework.
4212 */
chcr_register_alg(void)4213 static int chcr_register_alg(void)
4214 {
4215 struct crypto_alg ai;
4216 struct ahash_alg *a_hash;
4217 int err = 0, i;
4218 char *name = NULL;
4219
4220 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4221 if (driver_algs[i].is_registered)
4222 continue;
4223 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4224 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4225 driver_algs[i].alg.crypto.cra_priority =
4226 CHCR_CRA_PRIORITY;
4227 driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4228 driver_algs[i].alg.crypto.cra_flags =
4229 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4230 CRYPTO_ALG_NEED_FALLBACK;
4231 driver_algs[i].alg.crypto.cra_ctxsize =
4232 sizeof(struct chcr_context) +
4233 sizeof(struct ablk_ctx);
4234 driver_algs[i].alg.crypto.cra_alignmask = 0;
4235 driver_algs[i].alg.crypto.cra_type =
4236 &crypto_ablkcipher_type;
4237 err = crypto_register_alg(&driver_algs[i].alg.crypto);
4238 name = driver_algs[i].alg.crypto.cra_driver_name;
4239 break;
4240 case CRYPTO_ALG_TYPE_AEAD:
4241 driver_algs[i].alg.aead.base.cra_flags =
4242 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4243 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4244 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4245 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4246 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4247 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4248 err = crypto_register_aead(&driver_algs[i].alg.aead);
4249 name = driver_algs[i].alg.aead.base.cra_driver_name;
4250 break;
4251 case CRYPTO_ALG_TYPE_AHASH:
4252 a_hash = &driver_algs[i].alg.hash;
4253 a_hash->update = chcr_ahash_update;
4254 a_hash->final = chcr_ahash_final;
4255 a_hash->finup = chcr_ahash_finup;
4256 a_hash->digest = chcr_ahash_digest;
4257 a_hash->export = chcr_ahash_export;
4258 a_hash->import = chcr_ahash_import;
4259 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4260 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4261 a_hash->halg.base.cra_module = THIS_MODULE;
4262 a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4263 a_hash->halg.base.cra_alignmask = 0;
4264 a_hash->halg.base.cra_exit = NULL;
4265
4266 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4267 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4268 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4269 a_hash->init = chcr_hmac_init;
4270 a_hash->setkey = chcr_ahash_setkey;
4271 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4272 } else {
4273 a_hash->init = chcr_sha_init;
4274 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4275 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4276 }
4277 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4278 ai = driver_algs[i].alg.hash.halg.base;
4279 name = ai.cra_driver_name;
4280 break;
4281 }
4282 if (err) {
4283 pr_err("chcr : %s : Algorithm registration failed\n",
4284 name);
4285 goto register_err;
4286 } else {
4287 driver_algs[i].is_registered = 1;
4288 }
4289 }
4290 return 0;
4291
4292 register_err:
4293 chcr_unregister_alg();
4294 return err;
4295 }
4296
4297 /*
4298 * start_crypto - Register the crypto algorithms.
4299 * This should called once when the first device comesup. After this
4300 * kernel will start calling driver APIs for crypto operations.
4301 */
start_crypto(void)4302 int start_crypto(void)
4303 {
4304 return chcr_register_alg();
4305 }
4306
4307 /*
4308 * stop_crypto - Deregister all the crypto algorithms with kernel.
4309 * This should be called once when the last device goes down. After this
4310 * kernel will not call the driver API for crypto operations.
4311 */
stop_crypto(void)4312 int stop_crypto(void)
4313 {
4314 chcr_unregister_alg();
4315 return 0;
4316 }
4317