• Home
  • Raw
  • Download

Lines Matching +full:sha +full:- +full:1

12  * Some ideas are from atmel-sha.c and omap-sham.c drivers.
16 #include <crypto/sha.h>
17 #include "mtk-platform.h"
19 #define SHA_ALIGN_MSK (sizeof(u32) - 1)
23 #define SHA_OP_UPDATE 1
29 /* SHA command token */
36 /* SHA transform information */
49 /* SHA flags */
51 #define SHA_FLAGS_FINAL BIT(1)
64 * mtk_sha_info - hardware information of AES
132 return readl_relaxed(cryp->base + offset); in mtk_sha_read()
138 writel_relaxed(value, cryp->base + offset); in mtk_sha_write()
146 *cmd_curr = ring->cmd_next++; in mtk_sha_ring_shift()
147 *res_curr = ring->res_next++; in mtk_sha_ring_shift()
150 if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) { in mtk_sha_ring_shift()
151 ring->cmd_next = ring->cmd_base; in mtk_sha_ring_shift()
152 ring->res_next = ring->res_base; in mtk_sha_ring_shift()
162 if (!tctx->cryp) { in mtk_sha_find_dev()
167 tctx->cryp = cryp; in mtk_sha_find_dev()
169 cryp = tctx->cryp; in mtk_sha_find_dev()
173 * Assign record id to tfm in round-robin fashion, and this in mtk_sha_find_dev()
176 tctx->id = cryp->rec; in mtk_sha_find_dev()
177 cryp->rec = !cryp->rec; in mtk_sha_find_dev()
188 while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) { in mtk_sha_append_sg()
189 count = min(ctx->sg->length - ctx->offset, ctx->total); in mtk_sha_append_sg()
190 count = min(count, SHA_BUF_SIZE - ctx->bufcnt); in mtk_sha_append_sg()
199 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { in mtk_sha_append_sg()
200 ctx->sg = sg_next(ctx->sg); in mtk_sha_append_sg()
207 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, in mtk_sha_append_sg()
208 ctx->offset, count, 0); in mtk_sha_append_sg()
210 ctx->bufcnt += count; in mtk_sha_append_sg()
211 ctx->offset += count; in mtk_sha_append_sg()
212 ctx->total -= count; in mtk_sha_append_sg()
214 if (ctx->offset == ctx->sg->length) { in mtk_sha_append_sg()
215 ctx->sg = sg_next(ctx->sg); in mtk_sha_append_sg()
216 if (ctx->sg) in mtk_sha_append_sg()
217 ctx->offset = 0; in mtk_sha_append_sg()
219 ctx->total = 0; in mtk_sha_append_sg()
229 * The bit "1" is appended at the end of the message followed by
230 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
235 * - if message length < 56 bytes then padlen = 56 - message length
236 * - else padlen = 64 + 56 - message length
239 * - if message length < 112 bytes then padlen = 112 - message length
240 * - else padlen = 128 + 112 - message length
246 u64 size = ctx->digcnt; in mtk_sha_fill_padding()
248 size += ctx->bufcnt; in mtk_sha_fill_padding()
251 bits[1] = cpu_to_be64(size << 3); in mtk_sha_fill_padding()
254 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { in mtk_sha_fill_padding()
257 index = ctx->bufcnt & 0x7f; in mtk_sha_fill_padding()
258 padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); in mtk_sha_fill_padding()
259 *(ctx->buffer + ctx->bufcnt) = 0x80; in mtk_sha_fill_padding()
260 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); in mtk_sha_fill_padding()
261 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); in mtk_sha_fill_padding()
262 ctx->bufcnt += padlen + 16; in mtk_sha_fill_padding()
263 ctx->flags |= SHA_FLAGS_PAD; in mtk_sha_fill_padding()
267 index = ctx->bufcnt & 0x3f; in mtk_sha_fill_padding()
268 padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); in mtk_sha_fill_padding()
269 *(ctx->buffer + ctx->bufcnt) = 0x80; in mtk_sha_fill_padding()
270 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); in mtk_sha_fill_padding()
271 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); in mtk_sha_fill_padding()
272 ctx->bufcnt += padlen + 8; in mtk_sha_fill_padding()
273 ctx->flags |= SHA_FLAGS_PAD; in mtk_sha_fill_padding()
278 /* Initialize basic transform information of SHA */
281 struct mtk_sha_info *info = &ctx->info; in mtk_sha_info_init()
283 ctx->ct_hdr = SHA_CT_CTRL_HDR; in mtk_sha_info_init()
284 ctx->ct_size = SHA_CT_SIZE; in mtk_sha_info_init()
286 info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds)); in mtk_sha_info_init()
288 switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { in mtk_sha_info_init()
290 info->tfm[0] |= SHA_TFM_SHA1; in mtk_sha_info_init()
293 info->tfm[0] |= SHA_TFM_SHA224; in mtk_sha_info_init()
296 info->tfm[0] |= SHA_TFM_SHA256; in mtk_sha_info_init()
299 info->tfm[0] |= SHA_TFM_SHA384; in mtk_sha_info_init()
302 info->tfm[0] |= SHA_TFM_SHA512; in mtk_sha_info_init()
310 info->tfm[1] = SHA_TFM_HASH_STORE; in mtk_sha_info_init()
311 info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START; in mtk_sha_info_init()
312 info->ctrl[1] = info->tfm[1]; in mtk_sha_info_init()
314 info->cmd[0] = SHA_CMD0; in mtk_sha_info_init()
315 info->cmd[1] = SHA_CMD1; in mtk_sha_info_init()
316 info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); in mtk_sha_info_init()
324 struct mtk_sha_rec *sha, in mtk_sha_info_update() argument
327 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_info_update()
328 struct mtk_sha_info *info = &ctx->info; in mtk_sha_info_update()
330 ctx->ct_hdr &= ~SHA_DATA_LEN_MSK; in mtk_sha_info_update()
331 ctx->ct_hdr |= cpu_to_le32(len1 + len2); in mtk_sha_info_update()
332 info->cmd[0] &= ~SHA_DATA_LEN_MSK; in mtk_sha_info_update()
333 info->cmd[0] |= cpu_to_le32(len1 + len2); in mtk_sha_info_update()
336 if (ctx->digcnt) in mtk_sha_info_update()
337 info->ctrl[0] &= ~SHA_TFM_START; in mtk_sha_info_update()
339 ctx->digcnt += len1; in mtk_sha_info_update()
341 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), in mtk_sha_info_update()
343 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) { in mtk_sha_info_update()
344 dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); in mtk_sha_info_update()
345 return -EINVAL; in mtk_sha_info_update()
348 ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd); in mtk_sha_info_update()
354 * Because of hardware limitation, we must pre-calculate the inner
361 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in mtk_sha_finish_hmac()
362 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_finish_hmac()
365 SHASH_DESC_ON_STACK(shash, bctx->shash); in mtk_sha_finish_hmac()
367 shash->tfm = bctx->shash; in mtk_sha_finish_hmac()
368 shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ in mtk_sha_finish_hmac()
371 crypto_shash_update(shash, bctx->opad, ctx->bs) ?: in mtk_sha_finish_hmac()
372 crypto_shash_finup(shash, req->result, ctx->ds, req->result); in mtk_sha_finish_hmac()
382 ctx->flags = 0; in mtk_sha_init()
383 ctx->ds = crypto_ahash_digestsize(tfm); in mtk_sha_init()
385 switch (ctx->ds) { in mtk_sha_init()
387 ctx->flags |= SHA_FLAGS_SHA1; in mtk_sha_init()
388 ctx->bs = SHA1_BLOCK_SIZE; in mtk_sha_init()
391 ctx->flags |= SHA_FLAGS_SHA224; in mtk_sha_init()
392 ctx->bs = SHA224_BLOCK_SIZE; in mtk_sha_init()
395 ctx->flags |= SHA_FLAGS_SHA256; in mtk_sha_init()
396 ctx->bs = SHA256_BLOCK_SIZE; in mtk_sha_init()
399 ctx->flags |= SHA_FLAGS_SHA384; in mtk_sha_init()
400 ctx->bs = SHA384_BLOCK_SIZE; in mtk_sha_init()
403 ctx->flags |= SHA_FLAGS_SHA512; in mtk_sha_init()
404 ctx->bs = SHA512_BLOCK_SIZE; in mtk_sha_init()
407 return -EINVAL; in mtk_sha_init()
410 ctx->bufcnt = 0; in mtk_sha_init()
411 ctx->digcnt = 0; in mtk_sha_init()
412 ctx->buffer = tctx->buf; in mtk_sha_init()
414 if (tctx->flags & SHA_FLAGS_HMAC) { in mtk_sha_init()
415 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_init()
417 memcpy(ctx->buffer, bctx->ipad, ctx->bs); in mtk_sha_init()
418 ctx->bufcnt = ctx->bs; in mtk_sha_init()
419 ctx->flags |= SHA_FLAGS_HMAC; in mtk_sha_init()
425 static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, in mtk_sha_xmit() argument
429 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_xmit()
430 struct mtk_ring *ring = cryp->ring[sha->id]; in mtk_sha_xmit()
434 err = mtk_sha_info_update(cryp, sha, len1, len2); in mtk_sha_xmit()
441 res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1); in mtk_sha_xmit()
442 cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) | in mtk_sha_xmit()
443 MTK_DESC_CT_LEN(ctx->ct_size); in mtk_sha_xmit()
444 cmd->buf = cpu_to_le32(addr1); in mtk_sha_xmit()
445 cmd->ct = cpu_to_le32(ctx->ct_dma); in mtk_sha_xmit()
446 cmd->ct_hdr = ctx->ct_hdr; in mtk_sha_xmit()
447 cmd->tfm = cpu_to_le32(ctx->tfm_dma); in mtk_sha_xmit()
452 res->hdr = MTK_DESC_BUF_LEN(len2); in mtk_sha_xmit()
453 cmd->hdr = MTK_DESC_BUF_LEN(len2); in mtk_sha_xmit()
454 cmd->buf = cpu_to_le32(addr2); in mtk_sha_xmit()
457 cmd->hdr |= MTK_DESC_LAST; in mtk_sha_xmit()
458 res->hdr |= MTK_DESC_LAST; in mtk_sha_xmit()
466 mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); in mtk_sha_xmit()
467 mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); in mtk_sha_xmit()
469 return -EINPROGRESS; in mtk_sha_xmit()
473 struct mtk_sha_rec *sha, in mtk_sha_dma_map() argument
477 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, in mtk_sha_dma_map()
479 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { in mtk_sha_dma_map()
480 dev_err(cryp->dev, "dma map error\n"); in mtk_sha_dma_map()
481 return -EINVAL; in mtk_sha_dma_map()
484 ctx->flags &= ~SHA_FLAGS_SG; in mtk_sha_dma_map()
486 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0); in mtk_sha_dma_map()
490 struct mtk_sha_rec *sha) in mtk_sha_update_slow() argument
492 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_update_slow()
498 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in mtk_sha_update_slow()
500 dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt); in mtk_sha_update_slow()
503 sha->flags |= SHA_FLAGS_FINAL; in mtk_sha_update_slow()
507 if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) { in mtk_sha_update_slow()
508 count = ctx->bufcnt; in mtk_sha_update_slow()
509 ctx->bufcnt = 0; in mtk_sha_update_slow()
511 return mtk_sha_dma_map(cryp, sha, ctx, count); in mtk_sha_update_slow()
517 struct mtk_sha_rec *sha) in mtk_sha_update_start() argument
519 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_update_start()
523 if (!ctx->total) in mtk_sha_update_start()
526 if (ctx->bufcnt || ctx->offset) in mtk_sha_update_start()
527 return mtk_sha_update_slow(cryp, sha); in mtk_sha_update_start()
529 sg = ctx->sg; in mtk_sha_update_start()
531 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in mtk_sha_update_start()
532 return mtk_sha_update_slow(cryp, sha); in mtk_sha_update_start()
534 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs)) in mtk_sha_update_start()
535 /* size is not ctx->bs aligned */ in mtk_sha_update_start()
536 return mtk_sha_update_slow(cryp, sha); in mtk_sha_update_start()
538 len = min(ctx->total, sg->length); in mtk_sha_update_start()
541 if (!(ctx->flags & SHA_FLAGS_FINUP)) { in mtk_sha_update_start()
542 /* not last sg must be ctx->bs aligned */ in mtk_sha_update_start()
543 tail = len & (ctx->bs - 1); in mtk_sha_update_start()
544 len -= tail; in mtk_sha_update_start()
548 ctx->total -= len; in mtk_sha_update_start()
549 ctx->offset = len; /* offset where to start slow */ in mtk_sha_update_start()
551 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in mtk_sha_update_start()
557 tail = len & (ctx->bs - 1); in mtk_sha_update_start()
558 len -= tail; in mtk_sha_update_start()
559 ctx->total += tail; in mtk_sha_update_start()
560 ctx->offset = len; /* offset where to start slow */ in mtk_sha_update_start()
562 sg = ctx->sg; in mtk_sha_update_start()
566 ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, in mtk_sha_update_start()
568 if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { in mtk_sha_update_start()
569 dev_err(cryp->dev, "dma map bytes error\n"); in mtk_sha_update_start()
570 return -EINVAL; in mtk_sha_update_start()
573 sha->flags |= SHA_FLAGS_FINAL; in mtk_sha_update_start()
574 count = ctx->bufcnt; in mtk_sha_update_start()
575 ctx->bufcnt = 0; in mtk_sha_update_start()
578 ctx->flags &= ~SHA_FLAGS_SG; in mtk_sha_update_start()
579 return mtk_sha_xmit(cryp, sha, ctx->dma_addr, in mtk_sha_update_start()
583 ctx->sg = sg; in mtk_sha_update_start()
584 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in mtk_sha_update_start()
585 dev_err(cryp->dev, "dma_map_sg error\n"); in mtk_sha_update_start()
586 return -EINVAL; in mtk_sha_update_start()
589 ctx->flags |= SHA_FLAGS_SG; in mtk_sha_update_start()
590 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), in mtk_sha_update_start()
591 len, ctx->dma_addr, count); in mtk_sha_update_start()
595 if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in mtk_sha_update_start()
596 dev_err(cryp->dev, "dma_map_sg error\n"); in mtk_sha_update_start()
597 return -EINVAL; in mtk_sha_update_start()
600 ctx->flags |= SHA_FLAGS_SG; in mtk_sha_update_start()
602 return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), in mtk_sha_update_start()
607 struct mtk_sha_rec *sha) in mtk_sha_final_req() argument
609 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_final_req()
614 sha->flags |= SHA_FLAGS_FINAL; in mtk_sha_final_req()
615 count = ctx->bufcnt; in mtk_sha_final_req()
616 ctx->bufcnt = 0; in mtk_sha_final_req()
618 return mtk_sha_dma_map(cryp, sha, ctx, count); in mtk_sha_final_req()
625 __le32 *digest = ctx->info.digest; in mtk_sha_finish()
626 u32 *result = (u32 *)req->result; in mtk_sha_finish()
630 for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++) in mtk_sha_finish()
633 if (ctx->flags & SHA_FLAGS_HMAC) in mtk_sha_finish()
640 struct mtk_sha_rec *sha, in mtk_sha_finish_req() argument
643 if (likely(!err && (SHA_FLAGS_FINAL & sha->flags))) in mtk_sha_finish_req()
644 err = mtk_sha_finish(sha->req); in mtk_sha_finish_req()
646 sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL); in mtk_sha_finish_req()
648 sha->req->base.complete(&sha->req->base, err); in mtk_sha_finish_req()
651 tasklet_schedule(&sha->queue_task); in mtk_sha_finish_req()
657 struct mtk_sha_rec *sha = cryp->sha[id]; in mtk_sha_handle_queue() local
663 spin_lock_irqsave(&sha->lock, flags); in mtk_sha_handle_queue()
665 ret = ahash_enqueue_request(&sha->queue, req); in mtk_sha_handle_queue()
667 if (SHA_FLAGS_BUSY & sha->flags) { in mtk_sha_handle_queue()
668 spin_unlock_irqrestore(&sha->lock, flags); in mtk_sha_handle_queue()
672 backlog = crypto_get_backlog(&sha->queue); in mtk_sha_handle_queue()
673 async_req = crypto_dequeue_request(&sha->queue); in mtk_sha_handle_queue()
675 sha->flags |= SHA_FLAGS_BUSY; in mtk_sha_handle_queue()
676 spin_unlock_irqrestore(&sha->lock, flags); in mtk_sha_handle_queue()
682 backlog->complete(backlog, -EINPROGRESS); in mtk_sha_handle_queue()
687 sha->req = req; in mtk_sha_handle_queue()
691 if (ctx->op == SHA_OP_UPDATE) { in mtk_sha_handle_queue()
692 err = mtk_sha_update_start(cryp, sha); in mtk_sha_handle_queue()
693 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) in mtk_sha_handle_queue()
695 err = mtk_sha_final_req(cryp, sha); in mtk_sha_handle_queue()
696 } else if (ctx->op == SHA_OP_FINAL) { in mtk_sha_handle_queue()
697 err = mtk_sha_final_req(cryp, sha); in mtk_sha_handle_queue()
700 if (unlikely(err != -EINPROGRESS)) in mtk_sha_handle_queue()
702 mtk_sha_finish_req(cryp, sha, err); in mtk_sha_handle_queue()
710 struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in mtk_sha_enqueue()
712 ctx->op = op; in mtk_sha_enqueue()
714 return mtk_sha_handle_queue(tctx->cryp, tctx->id, req); in mtk_sha_enqueue()
717 static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha) in mtk_sha_unmap() argument
719 struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); in mtk_sha_unmap()
721 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), in mtk_sha_unmap()
724 if (ctx->flags & SHA_FLAGS_SG) { in mtk_sha_unmap()
725 dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE); in mtk_sha_unmap()
726 if (ctx->sg->length == ctx->offset) { in mtk_sha_unmap()
727 ctx->sg = sg_next(ctx->sg); in mtk_sha_unmap()
728 if (ctx->sg) in mtk_sha_unmap()
729 ctx->offset = 0; in mtk_sha_unmap()
731 if (ctx->flags & SHA_FLAGS_PAD) { in mtk_sha_unmap()
732 dma_unmap_single(cryp->dev, ctx->dma_addr, in mtk_sha_unmap()
736 dma_unmap_single(cryp->dev, ctx->dma_addr, in mtk_sha_unmap()
741 struct mtk_sha_rec *sha) in mtk_sha_complete() argument
745 err = mtk_sha_update_start(cryp, sha); in mtk_sha_complete()
746 if (err != -EINPROGRESS) in mtk_sha_complete()
747 mtk_sha_finish_req(cryp, sha, err); in mtk_sha_complete()
754 ctx->total = req->nbytes; in mtk_sha_update()
755 ctx->sg = req->src; in mtk_sha_update()
756 ctx->offset = 0; in mtk_sha_update()
758 if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) && in mtk_sha_update()
759 !(ctx->flags & SHA_FLAGS_FINUP)) in mtk_sha_update()
769 ctx->flags |= SHA_FLAGS_FINUP; in mtk_sha_final()
771 if (ctx->flags & SHA_FLAGS_PAD) in mtk_sha_final()
782 ctx->flags |= SHA_FLAGS_FINUP; in mtk_sha_finup()
785 if (err1 == -EINPROGRESS || err1 == -EBUSY) in mtk_sha_finup()
805 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_setkey()
806 size_t bs = crypto_shash_blocksize(bctx->shash); in mtk_sha_setkey()
807 size_t ds = crypto_shash_digestsize(bctx->shash); in mtk_sha_setkey()
810 SHASH_DESC_ON_STACK(shash, bctx->shash); in mtk_sha_setkey()
812 shash->tfm = bctx->shash; in mtk_sha_setkey()
813 shash->flags = crypto_shash_get_flags(bctx->shash) & in mtk_sha_setkey()
817 err = crypto_shash_digest(shash, key, keylen, bctx->ipad); in mtk_sha_setkey()
822 memcpy(bctx->ipad, key, keylen); in mtk_sha_setkey()
825 memset(bctx->ipad + keylen, 0, bs - keylen); in mtk_sha_setkey()
826 memcpy(bctx->opad, bctx->ipad, bs); in mtk_sha_setkey()
829 bctx->ipad[i] ^= HMAC_IPAD_VALUE; in mtk_sha_setkey()
830 bctx->opad[i] ^= HMAC_OPAD_VALUE; in mtk_sha_setkey()
860 return -ENODEV; in mtk_sha_cra_init_alg()
866 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_cra_init_alg()
868 tctx->flags |= SHA_FLAGS_HMAC; in mtk_sha_cra_init_alg()
869 bctx->shash = crypto_alloc_shash(alg_base, 0, in mtk_sha_cra_init_alg()
871 if (IS_ERR(bctx->shash)) { in mtk_sha_cra_init_alg()
875 return PTR_ERR(bctx->shash); in mtk_sha_cra_init_alg()
915 if (tctx->flags & SHA_FLAGS_HMAC) { in mtk_sha_cra_exit()
916 struct mtk_sha_hmac_ctx *bctx = tctx->base; in mtk_sha_cra_exit()
918 crypto_free_shash(bctx->shash); in mtk_sha_cra_exit()
935 .cra_driver_name = "mtk-sha1",
958 .cra_driver_name = "mtk-sha224",
981 .cra_driver_name = "mtk-sha256",
1005 .cra_driver_name = "mtk-hmac-sha1",
1031 .cra_driver_name = "mtk-hmac-sha224",
1057 .cra_driver_name = "mtk-hmac-sha256",
1085 .cra_driver_name = "mtk-sha384",
1108 .cra_driver_name = "mtk-sha512",
1132 .cra_driver_name = "mtk-hmac-sha384",
1158 .cra_driver_name = "mtk-hmac-sha512",
1175 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; in mtk_sha_queue_task() local
1177 mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL); in mtk_sha_queue_task()
1182 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; in mtk_sha_done_task() local
1183 struct mtk_cryp *cryp = sha->cryp; in mtk_sha_done_task()
1185 mtk_sha_unmap(cryp, sha); in mtk_sha_done_task()
1186 mtk_sha_complete(cryp, sha); in mtk_sha_done_task()
1191 struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id; in mtk_sha_irq() local
1192 struct mtk_cryp *cryp = sha->cryp; in mtk_sha_irq()
1193 u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id)); in mtk_sha_irq()
1195 mtk_sha_write(cryp, RDR_STAT(sha->id), val); in mtk_sha_irq()
1197 if (likely((SHA_FLAGS_BUSY & sha->flags))) { in mtk_sha_irq()
1198 mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST); in mtk_sha_irq()
1199 mtk_sha_write(cryp, RDR_THRESH(sha->id), in mtk_sha_irq()
1202 tasklet_schedule(&sha->done_task); in mtk_sha_irq()
1204 dev_warn(cryp->dev, "SHA interrupt when no active requests.\n"); in mtk_sha_irq()
1210 * The purpose of two SHA records is used to get extra performance.
1215 struct mtk_sha_rec **sha = cryp->sha; in mtk_sha_record_init() local
1216 int i, err = -ENOMEM; in mtk_sha_record_init()
1219 sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL); in mtk_sha_record_init()
1220 if (!sha[i]) in mtk_sha_record_init()
1223 sha[i]->cryp = cryp; in mtk_sha_record_init()
1225 spin_lock_init(&sha[i]->lock); in mtk_sha_record_init()
1226 crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE); in mtk_sha_record_init()
1228 tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task, in mtk_sha_record_init()
1229 (unsigned long)sha[i]); in mtk_sha_record_init()
1230 tasklet_init(&sha[i]->done_task, mtk_sha_done_task, in mtk_sha_record_init()
1231 (unsigned long)sha[i]); in mtk_sha_record_init()
1235 sha[0]->id = MTK_RING2; in mtk_sha_record_init()
1236 sha[1]->id = MTK_RING3; in mtk_sha_record_init()
1238 cryp->rec = 1; in mtk_sha_record_init()
1243 for (; i--; ) in mtk_sha_record_init()
1244 kfree(sha[i]); in mtk_sha_record_init()
1253 tasklet_kill(&cryp->sha[i]->done_task); in mtk_sha_record_free()
1254 tasklet_kill(&cryp->sha[i]->queue_task); in mtk_sha_record_free()
1256 kfree(cryp->sha[i]); in mtk_sha_record_free()
1290 for (; i--; ) in mtk_sha_register_algs()
1294 for (; i--; ) in mtk_sha_register_algs()
1304 INIT_LIST_HEAD(&cryp->sha_list); in mtk_hash_alg_register()
1311 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq, in mtk_hash_alg_register()
1312 0, "mtk-sha", cryp->sha[0]); in mtk_hash_alg_register()
1314 dev_err(cryp->dev, "unable to request sha irq0.\n"); in mtk_hash_alg_register()
1318 err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq, in mtk_hash_alg_register()
1319 0, "mtk-sha", cryp->sha[1]); in mtk_hash_alg_register()
1321 dev_err(cryp->dev, "unable to request sha irq1.\n"); in mtk_hash_alg_register()
1330 list_add_tail(&cryp->sha_list, &mtk_sha.dev_list); in mtk_hash_alg_register()
1341 list_del(&cryp->sha_list); in mtk_hash_alg_register()
1347 dev_err(cryp->dev, "mtk-sha initialization failed.\n"); in mtk_hash_alg_register()
1354 list_del(&cryp->sha_list); in mtk_hash_alg_release()