• Home
  • Raw
  • Download

Lines Matching +full:sha +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0-only
9 * Some ideas are from atmel-sha.c and omap-sham.c drivers.
13 #include <crypto/sha.h>
14 #include "mtk-platform.h"
16 #define SHA_ALIGN_MSK (sizeof(u32) - 1)
20 #define SHA_OP_UPDATE 1
26 /* SHA command token */
33 /* SHA transform information */
46 /* SHA flags */
48 #define SHA_FLAGS_FINAL BIT(1)
61 * mtk_sha_info - hardware information of AES
129 return readl_relaxed(cryp->base + offset); in mtk_sha_read()
135 writel_relaxed(value, cryp->base + offset); in mtk_sha_write()
143 *cmd_curr = ring->cmd_next++; in mtk_sha_ring_shift()
144 *res_curr = ring->res_next++; in mtk_sha_ring_shift()
147 if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) { in mtk_sha_ring_shift()
148 ring->cmd_next = ring->cmd_base; in mtk_sha_ring_shift()
149 ring->res_next = ring->res_base; in mtk_sha_ring_shift()
159 if (!tctx->cryp) { in mtk_sha_find_dev()
164 tctx->cryp = cryp; in mtk_sha_find_dev()
166 cryp = tctx->cryp; in mtk_sha_find_dev()
170 * Assign record id to tfm in round-robin fashion, and this in mtk_sha_find_dev()
173 tctx->id = cryp->rec; in mtk_sha_find_dev()
174 cryp->rec = !cryp->rec; in mtk_sha_find_dev()
185 while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) { in mtk_sha_append_sg()
186 count = min(ctx->sg->length - ctx->offset, ctx->total); in mtk_sha_append_sg()
187 count = min(count, SHA_BUF_SIZE - ctx->bufcnt); in mtk_sha_append_sg()
196 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { in mtk_sha_append_sg()
197 ctx->sg = sg_next(ctx->sg); in mtk_sha_append_sg()
204 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, in mtk_sha_append_sg()
205 ctx->offset, count, 0); in mtk_sha_append_sg()
207 ctx->bufcnt += count; in mtk_sha_append_sg()
208 ctx->offset += count; in mtk_sha_append_sg()
209 ctx->total -= count; in mtk_sha_append_sg()
211 if (ctx->offset == ctx->sg->length) { in mtk_sha_append_sg()
212 ctx->sg = sg_next(ctx->sg); in mtk_sha_append_sg()
213 if (ctx->sg) in mtk_sha_append_sg()
214 ctx->offset = 0; in mtk_sha_append_sg()
216 ctx->total = 0; in mtk_sha_append_sg()
226 * The bit "1" is appended at the end of the message followed by
227 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
232 * - if message length < 56 bytes then padlen = 56 - message length
233 * - else padlen = 64 + 56 - message length
236 * - if message length < 112 bytes then padlen = 112 - message length
237 * - else padlen = 128 + 112 - message length
243 u64 size = ctx->digcnt; in mtk_sha_fill_padding()
245 size += ctx->bufcnt; in mtk_sha_fill_padding()
248 bits[1] = cpu_to_be64(size << 3); in mtk_sha_fill_padding()
251 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { in mtk_sha_fill_padding()
254 index = ctx->bufcnt & 0x7f; in mtk_sha_fill_padding()
255 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); in mtk_sha_fill_padding()
256 *(ctx->buffer + ctx->bufcnt) = 0x80; in mtk_sha_fill_padding()
257 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); in mtk_sha_fill_padding()
258 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); in mtk_sha_fill_padding()
259 ctx->bufcnt += padlen + 16; in mtk_sha_fill_padding()
260 ctx->flags |= SHA_FLAGS_PAD; in mtk_sha_fill_padding()
264 index = ctx->bufcnt & 0x3f; in mtk_sha_fill_padding()
265 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); in mtk_sha_fill_padding()
266 *(ctx->buffer + ctx->bufcnt) = 0x80; in mtk_sha_fill_padding()
267 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); in mtk_sha_fill_padding()
268 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); in mtk_sha_fill_padding()
269 ctx->bufcnt += padlen + 8; in mtk_sha_fill_padding()
270 ctx->flags |= SHA_FLAGS_PAD; in mtk_sha_fill_padding()
275 /* Initialize basic transform information of SHA */
278 struct mtk_sha_info *info = &ctx->info; in mtk_sha_info_init()
280 ctx->ct_hdr = SHA_CT_CTRL_HDR; in mtk_sha_info_init()
281 ctx->ct_size = SHA_CT_SIZE; in mtk_sha_info_init()
283 info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds)); in mtk_sha_info_init()
285 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { in mtk_sha_info_init()
287 info->tfm[0] |= SHA_TFM_SHA1; in mtk_sha_info_init()
290 info->tfm[0] |= SHA_TFM_SHA224; in mtk_sha_info_init()
293 info->tfm[0] |= SHA_TFM_SHA256; in mtk_sha_info_init()
296 info->tfm[0] |= SHA_TFM_SHA384; in mtk_sha_info_init()
299 info->tfm[0] |= SHA_TFM_SHA512; in mtk_sha_info_init()
307 info->tfm[1] = SHA_TFM_HASH_STORE; in mtk_sha_info_init()
308 info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START; in mtk_sha_info_init()
309 info->ctrl[1] = info->tfm[1]; in mtk_sha_info_init()
311 info->cmd[0] = SHA_CMD0; in mtk_sha_info_init()
312 info->cmd[1] = SHA_CMD1; in mtk_sha_info_init()
313 info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); in mtk_sha_info_init()
321 struct mtk_sha_rec *sha, in mtk_sha_info_update() argument
324 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_info_update()
325 struct mtk_sha_info *info = &ctx->info; in mtk_sha_info_update()
327 ctx->ct_hdr &= ~SHA_DATA_LEN_MSK; in mtk_sha_info_update()
328 ctx->ct_hdr |= cpu_to_le32(len1 + len2); in mtk_sha_info_update()
329 info->cmd[0] &= ~SHA_DATA_LEN_MSK; in mtk_sha_info_update()
330 info->cmd[0] |= cpu_to_le32(len1 + len2); in mtk_sha_info_update()
333 if (ctx->digcnt) in mtk_sha_info_update()
334 info->ctrl[0] &= ~SHA_TFM_START; in mtk_sha_info_update()
336 ctx->digcnt += len1; in mtk_sha_info_update()
338 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), in mtk_sha_info_update()
340 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) { in mtk_sha_info_update()
341 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); in mtk_sha_info_update()
342 return -EINVAL; in mtk_sha_info_update()
345 ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd); in mtk_sha_info_update()
351 * Because of hardware limitation, we must pre-calculate the inner
358 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in mtk_sha_finish_hmac()
359 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_finish_hmac()
362 SHASH_DESC_ON_STACK(shash, bctx->shash); in mtk_sha_finish_hmac()
364 shash->tfm = bctx->shash; in mtk_sha_finish_hmac()
367 crypto_shash_update(shash, bctx->opad, ctx->bs) ?: in mtk_sha_finish_hmac()
368 crypto_shash_finup(shash, req->result, ctx->ds, req->result); in mtk_sha_finish_hmac()
378 ctx->flags = 0; in mtk_sha_init()
379 ctx->ds = crypto_ahash_digestsize(tfm); in mtk_sha_init()
381 switch (ctx->ds) { in mtk_sha_init()
383 ctx->flags |= SHA_FLAGS_SHA1; in mtk_sha_init()
384 ctx->bs = SHA1_BLOCK_SIZE; in mtk_sha_init()
387 ctx->flags |= SHA_FLAGS_SHA224; in mtk_sha_init()
388 ctx->bs = SHA224_BLOCK_SIZE; in mtk_sha_init()
391 ctx->flags |= SHA_FLAGS_SHA256; in mtk_sha_init()
392 ctx->bs = SHA256_BLOCK_SIZE; in mtk_sha_init()
395 ctx->flags |= SHA_FLAGS_SHA384; in mtk_sha_init()
396 ctx->bs = SHA384_BLOCK_SIZE; in mtk_sha_init()
399 ctx->flags |= SHA_FLAGS_SHA512; in mtk_sha_init()
400 ctx->bs = SHA512_BLOCK_SIZE; in mtk_sha_init()
403 return -EINVAL; in mtk_sha_init()
406 ctx->bufcnt = 0; in mtk_sha_init()
407 ctx->digcnt = 0; in mtk_sha_init()
408 ctx->buffer = tctx->buf; in mtk_sha_init()
410 if (tctx->flags & SHA_FLAGS_HMAC) { in mtk_sha_init()
411 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_init()
413 memcpy(ctx->buffer, bctx->ipad, ctx->bs); in mtk_sha_init()
414 ctx->bufcnt = ctx->bs; in mtk_sha_init()
415 ctx->flags |= SHA_FLAGS_HMAC; in mtk_sha_init()
421 static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, in mtk_sha_xmit() argument
425 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_xmit()
426 struct mtk_ring *ring = cryp->ring[sha->id]; in mtk_sha_xmit()
430 err = mtk_sha_info_update(cryp, sha, len1, len2); in mtk_sha_xmit()
437 res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1); in mtk_sha_xmit()
438 cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) | in mtk_sha_xmit()
439 MTK_DESC_CT_LEN(ctx->ct_size); in mtk_sha_xmit()
440 cmd->buf = cpu_to_le32(addr1); in mtk_sha_xmit()
441 cmd->ct = cpu_to_le32(ctx->ct_dma); in mtk_sha_xmit()
442 cmd->ct_hdr = ctx->ct_hdr; in mtk_sha_xmit()
443 cmd->tfm = cpu_to_le32(ctx->tfm_dma); in mtk_sha_xmit()
448 res->hdr = MTK_DESC_BUF_LEN(len2); in mtk_sha_xmit()
449 cmd->hdr = MTK_DESC_BUF_LEN(len2); in mtk_sha_xmit()
450 cmd->buf = cpu_to_le32(addr2); in mtk_sha_xmit()
453 cmd->hdr |= MTK_DESC_LAST; in mtk_sha_xmit()
454 res->hdr |= MTK_DESC_LAST; in mtk_sha_xmit()
462 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); in mtk_sha_xmit()
463 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); in mtk_sha_xmit()
465 return -EINPROGRESS; in mtk_sha_xmit()
469 struct mtk_sha_rec *sha, in mtk_sha_dma_map() argument
473 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, in mtk_sha_dma_map()
475 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { in mtk_sha_dma_map()
476 dev_err(cryp->dev, "dma map error\n"); in mtk_sha_dma_map()
477 return -EINVAL; in mtk_sha_dma_map()
480 ctx->flags &= ~SHA_FLAGS_SG; in mtk_sha_dma_map()
482 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0); in mtk_sha_dma_map()
486 struct mtk_sha_rec *sha) in mtk_sha_update_slow() argument
488 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_update_slow()
494 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in mtk_sha_update_slow()
496 dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt); in mtk_sha_update_slow()
499 sha->flags |= SHA_FLAGS_FINAL; in mtk_sha_update_slow()
503 if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) { in mtk_sha_update_slow()
504 count = ctx->bufcnt; in mtk_sha_update_slow()
505 ctx->bufcnt = 0; in mtk_sha_update_slow()
507 return mtk_sha_dma_map(cryp, sha, ctx, count); in mtk_sha_update_slow()
513 struct mtk_sha_rec *sha) in mtk_sha_update_start() argument
515 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_update_start()
519 if (!ctx->total) in mtk_sha_update_start()
522 if (ctx->bufcnt || ctx->offset) in mtk_sha_update_start()
523 return mtk_sha_update_slow(cryp, sha); in mtk_sha_update_start()
525 sg = ctx->sg; in mtk_sha_update_start()
527 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in mtk_sha_update_start()
528 return mtk_sha_update_slow(cryp, sha); in mtk_sha_update_start()
530 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs)) in mtk_sha_update_start()
531 /* size is not ctx->bs aligned */ in mtk_sha_update_start()
532 return mtk_sha_update_slow(cryp, sha); in mtk_sha_update_start()
534 len = min(ctx->total, sg->length); in mtk_sha_update_start()
537 if (!(ctx->flags & SHA_FLAGS_FINUP)) { in mtk_sha_update_start()
538 /* not last sg must be ctx->bs aligned */ in mtk_sha_update_start()
539 tail = len & (ctx->bs - 1); in mtk_sha_update_start()
540 len -= tail; in mtk_sha_update_start()
544 ctx->total -= len; in mtk_sha_update_start()
545 ctx->offset = len; /* offset where to start slow */ in mtk_sha_update_start()
547 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in mtk_sha_update_start()
553 tail = len & (ctx->bs - 1); in mtk_sha_update_start()
554 len -= tail; in mtk_sha_update_start()
555 ctx->total += tail; in mtk_sha_update_start()
556 ctx->offset = len; /* offset where to start slow */ in mtk_sha_update_start()
558 sg = ctx->sg; in mtk_sha_update_start()
562 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, in mtk_sha_update_start()
564 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { in mtk_sha_update_start()
565 dev_err(cryp->dev, "dma map bytes error\n"); in mtk_sha_update_start()
566 return -EINVAL; in mtk_sha_update_start()
569 sha->flags |= SHA_FLAGS_FINAL; in mtk_sha_update_start()
570 count = ctx->bufcnt; in mtk_sha_update_start()
571 ctx->bufcnt = 0; in mtk_sha_update_start()
574 ctx->flags &= ~SHA_FLAGS_SG; in mtk_sha_update_start()
575 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, in mtk_sha_update_start()
579 ctx->sg = sg; in mtk_sha_update_start()
580 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in mtk_sha_update_start()
581 dev_err(cryp->dev, "dma_map_sg error\n"); in mtk_sha_update_start()
582 return -EINVAL; in mtk_sha_update_start()
585 ctx->flags |= SHA_FLAGS_SG; in mtk_sha_update_start()
586 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), in mtk_sha_update_start()
587 len, ctx->dma_addr, count); in mtk_sha_update_start()
591 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in mtk_sha_update_start()
592 dev_err(cryp->dev, "dma_map_sg error\n"); in mtk_sha_update_start()
593 return -EINVAL; in mtk_sha_update_start()
596 ctx->flags |= SHA_FLAGS_SG; in mtk_sha_update_start()
598 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), in mtk_sha_update_start()
603 struct mtk_sha_rec *sha) in mtk_sha_final_req() argument
605 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_final_req()
610 sha->flags |= SHA_FLAGS_FINAL; in mtk_sha_final_req()
611 count = ctx->bufcnt; in mtk_sha_final_req()
612 ctx->bufcnt = 0; in mtk_sha_final_req()
614 return mtk_sha_dma_map(cryp, sha, ctx, count); in mtk_sha_final_req()
621 __le32 *digest = ctx->info.digest; in mtk_sha_finish()
622 u32 *result = (u32 *)req->result; in mtk_sha_finish()
626 for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++) in mtk_sha_finish()
629 if (ctx->flags & SHA_FLAGS_HMAC) in mtk_sha_finish()
636 struct mtk_sha_rec *sha, in mtk_sha_finish_req() argument
639 if (likely(!err && (SHA_FLAGS_FINAL & sha->flags))) in mtk_sha_finish_req()
640 err = mtk_sha_finish(sha->req); in mtk_sha_finish_req()
642 sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL); in mtk_sha_finish_req()
644 sha->req->base.complete(&sha->req->base, err); in mtk_sha_finish_req()
647 tasklet_schedule(&sha->queue_task); in mtk_sha_finish_req()
653 struct mtk_sha_rec *sha = cryp->sha[id]; in mtk_sha_handle_queue() local
659 spin_lock_irqsave(&sha->lock, flags); in mtk_sha_handle_queue()
661 ret = ahash_enqueue_request(&sha->queue, req); in mtk_sha_handle_queue()
663 if (SHA_FLAGS_BUSY & sha->flags) { in mtk_sha_handle_queue()
664 spin_unlock_irqrestore(&sha->lock, flags); in mtk_sha_handle_queue()
668 backlog = crypto_get_backlog(&sha->queue); in mtk_sha_handle_queue()
669 async_req = crypto_dequeue_request(&sha->queue); in mtk_sha_handle_queue()
671 sha->flags |= SHA_FLAGS_BUSY; in mtk_sha_handle_queue()
672 spin_unlock_irqrestore(&sha->lock, flags); in mtk_sha_handle_queue()
678 backlog->complete(backlog, -EINPROGRESS); in mtk_sha_handle_queue()
683 sha->req = req; in mtk_sha_handle_queue()
687 if (ctx->op == SHA_OP_UPDATE) { in mtk_sha_handle_queue()
688 err = mtk_sha_update_start(cryp, sha); in mtk_sha_handle_queue()
689 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) in mtk_sha_handle_queue()
691 err = mtk_sha_final_req(cryp, sha); in mtk_sha_handle_queue()
692 } else if (ctx->op == SHA_OP_FINAL) { in mtk_sha_handle_queue()
693 err = mtk_sha_final_req(cryp, sha); in mtk_sha_handle_queue()
696 if (unlikely(err != -EINPROGRESS)) in mtk_sha_handle_queue()
698 mtk_sha_finish_req(cryp, sha, err); in mtk_sha_handle_queue()
706 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in mtk_sha_enqueue()
708 ctx->op = op; in mtk_sha_enqueue()
710 return mtk_sha_handle_queue(tctx->cryp, tctx->id, req); in mtk_sha_enqueue()
713 static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha) in mtk_sha_unmap() argument
715 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_unmap()
717 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), in mtk_sha_unmap()
720 if (ctx->flags & SHA_FLAGS_SG) { in mtk_sha_unmap()
721 dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE); in mtk_sha_unmap()
722 if (ctx->sg->length == ctx->offset) { in mtk_sha_unmap()
723 ctx->sg = sg_next(ctx->sg); in mtk_sha_unmap()
724 if (ctx->sg) in mtk_sha_unmap()
725 ctx->offset = 0; in mtk_sha_unmap()
727 if (ctx->flags & SHA_FLAGS_PAD) { in mtk_sha_unmap()
728 dma_unmap_single(cryp->dev, ctx->dma_addr, in mtk_sha_unmap()
732 dma_unmap_single(cryp->dev, ctx->dma_addr, in mtk_sha_unmap()
737 struct mtk_sha_rec *sha) in mtk_sha_complete() argument
741 err = mtk_sha_update_start(cryp, sha); in mtk_sha_complete()
742 if (err != -EINPROGRESS) in mtk_sha_complete()
743 mtk_sha_finish_req(cryp, sha, err); in mtk_sha_complete()
750 ctx->total = req->nbytes; in mtk_sha_update()
751 ctx->sg = req->src; in mtk_sha_update()
752 ctx->offset = 0; in mtk_sha_update()
754 if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) && in mtk_sha_update()
755 !(ctx->flags & SHA_FLAGS_FINUP)) in mtk_sha_update()
765 ctx->flags |= SHA_FLAGS_FINUP; in mtk_sha_final()
767 if (ctx->flags & SHA_FLAGS_PAD) in mtk_sha_final()
778 ctx->flags |= SHA_FLAGS_FINUP; in mtk_sha_finup()
781 if (err1 == -EINPROGRESS || in mtk_sha_finup()
782 (err1 == -EBUSY && (ahash_request_flags(req) & in mtk_sha_finup()
803 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_setkey()
804 size_t bs = crypto_shash_blocksize(bctx->shash); in mtk_sha_setkey()
805 size_t ds = crypto_shash_digestsize(bctx->shash); in mtk_sha_setkey()
809 err = crypto_shash_tfm_digest(bctx->shash, key, keylen, in mtk_sha_setkey()
810 bctx->ipad); in mtk_sha_setkey()
815 memcpy(bctx->ipad, key, keylen); in mtk_sha_setkey()
818 memset(bctx->ipad + keylen, 0, bs - keylen); in mtk_sha_setkey()
819 memcpy(bctx->opad, bctx->ipad, bs); in mtk_sha_setkey()
822 bctx->ipad[i] ^= HMAC_IPAD_VALUE; in mtk_sha_setkey()
823 bctx->opad[i] ^= HMAC_OPAD_VALUE; in mtk_sha_setkey()
853 return -ENODEV; in mtk_sha_cra_init_alg()
859 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_cra_init_alg()
861 tctx->flags |= SHA_FLAGS_HMAC; in mtk_sha_cra_init_alg()
862 bctx->shash = crypto_alloc_shash(alg_base, 0, in mtk_sha_cra_init_alg()
864 if (IS_ERR(bctx->shash)) { in mtk_sha_cra_init_alg()
868 return PTR_ERR(bctx->shash); in mtk_sha_cra_init_alg()
908 if (tctx->flags & SHA_FLAGS_HMAC) { in mtk_sha_cra_exit()
909 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_cra_exit()
911 crypto_free_shash(bctx->shash); in mtk_sha_cra_exit()
928 .cra_driver_name = "mtk-sha1",
951 .cra_driver_name = "mtk-sha224",
974 .cra_driver_name = "mtk-sha256",
998 .cra_driver_name = "mtk-hmac-sha1",
1024 .cra_driver_name = "mtk-hmac-sha224",
1050 .cra_driver_name = "mtk-hmac-sha256",
1078 .cra_driver_name = "mtk-sha384",
1101 .cra_driver_name = "mtk-sha512",
1125 .cra_driver_name = "mtk-hmac-sha384",
1151 .cra_driver_name = "mtk-hmac-sha512",
1168 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; in mtk_sha_queue_task() local
1170 mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL); in mtk_sha_queue_task()
1175 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; in mtk_sha_done_task() local
1176 struct mtk_cryp *cryp = sha->cryp; in mtk_sha_done_task()
1178 mtk_sha_unmap(cryp, sha); in mtk_sha_done_task()
1179 mtk_sha_complete(cryp, sha); in mtk_sha_done_task()
1184 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id; in mtk_sha_irq() local
1185 struct mtk_cryp *cryp = sha->cryp; in mtk_sha_irq()
1186 u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id)); in mtk_sha_irq()
1188 mtk_sha_write(cryp, RDR_STAT(sha->id), val); in mtk_sha_irq()
1190 if (likely((SHA_FLAGS_BUSY & sha->flags))) { in mtk_sha_irq()
1191 mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST); in mtk_sha_irq()
1192 mtk_sha_write(cryp, RDR_THRESH(sha->id), in mtk_sha_irq()
1195 tasklet_schedule(&sha->done_task); in mtk_sha_irq()
1197 dev_warn(cryp->dev, "SHA interrupt when no active requests.\n"); in mtk_sha_irq()
1203 * The purpose of two SHA records is used to get extra performance.
1208 struct mtk_sha_rec **sha = cryp->sha; in mtk_sha_record_init() local
1209 int i, err = -ENOMEM; in mtk_sha_record_init()
1212 sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL); in mtk_sha_record_init()
1213 if (!sha[i]) in mtk_sha_record_init()
1216 sha[i]->cryp = cryp; in mtk_sha_record_init()
1218 spin_lock_init(&sha[i]->lock); in mtk_sha_record_init()
1219 crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE); in mtk_sha_record_init()
1221 tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task, in mtk_sha_record_init()
1222 (unsigned long)sha[i]); in mtk_sha_record_init()
1223 tasklet_init(&sha[i]->done_task, mtk_sha_done_task, in mtk_sha_record_init()
1224 (unsigned long)sha[i]); in mtk_sha_record_init()
1228 sha[0]->id = MTK_RING2; in mtk_sha_record_init()
1229 sha[1]->id = MTK_RING3; in mtk_sha_record_init()
1231 cryp->rec = 1; in mtk_sha_record_init()
1236 for (; i--; ) in mtk_sha_record_init()
1237 kfree(sha[i]); in mtk_sha_record_init()
1246 tasklet_kill(&cryp->sha[i]->done_task); in mtk_sha_record_free()
1247 tasklet_kill(&cryp->sha[i]->queue_task); in mtk_sha_record_free()
1249 kfree(cryp->sha[i]); in mtk_sha_record_free()
1283 for (; i--; ) in mtk_sha_register_algs()
1287 for (; i--; ) in mtk_sha_register_algs()
1297 INIT_LIST_HEAD(&cryp->sha_list); in mtk_hash_alg_register()
1304 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq, in mtk_hash_alg_register()
1305 0, "mtk-sha", cryp->sha[0]); in mtk_hash_alg_register()
1307 dev_err(cryp->dev, "unable to request sha irq0.\n"); in mtk_hash_alg_register()
1311 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq, in mtk_hash_alg_register()
1312 0, "mtk-sha", cryp->sha[1]); in mtk_hash_alg_register()
1314 dev_err(cryp->dev, "unable to request sha irq1.\n"); in mtk_hash_alg_register()
1323 list_add_tail(&cryp->sha_list, &mtk_sha.dev_list); in mtk_hash_alg_register()
1334 list_del(&cryp->sha_list); in mtk_hash_alg_register()
1340 dev_err(cryp->dev, "mtk-sha initialization failed.\n"); in mtk_hash_alg_register()
1347 list_del(&cryp->sha_list); in mtk_hash_alg_release()