• Home
  • Raw
  • Download

Lines Matching +full:aes +full:- +full:gcm

4  * Driver for EIP97 AES acceleration.
12 * Some ideas are from atmel-aes.c drivers.
15 #include <crypto/aes.h>
16 #include <crypto/gcm.h>
17 #include "mtk-platform.h"
22 & ~(AES_BLOCK_SIZE - 1))
29 /* AES-CBC/ECB/CTR command token */
33 /* AES-GCM command token */
42 /* AES transform information word 0 fields */
53 /* AES transform information word 1 fields */
58 #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
59 #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
63 /* AES flags */
75 * mtk_aes_info - hardware information of AES
80 * Memory layout of GCM buffer:
81 * /-----------\
82 * | AES KEY | 128/196/256 bits
83 * |-----------|
85 * |-----------|
87 * \-----------/
90 * - Commands decoding and control of the engine's data path.
91 * - Coordinating hardware data fetch and store operations.
92 * - Result token construction and output.
154 return readl_relaxed(cryp->base + offset); in mtk_aes_read()
160 writel_relaxed(value, cryp->base + offset); in mtk_aes_write()
169 if (!ctx->cryp) { in mtk_aes_find_dev()
174 ctx->cryp = cryp; in mtk_aes_find_dev()
176 cryp = ctx->cryp; in mtk_aes_find_dev()
185 len &= AES_BLOCK_SIZE - 1; in mtk_aes_padlen()
186 return len ? AES_BLOCK_SIZE - len : 0; in mtk_aes_padlen()
198 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in mtk_aes_check_aligned()
201 if (len <= sg->length) { in mtk_aes_check_aligned()
205 dma->nents = nents + 1; in mtk_aes_check_aligned()
206 dma->remainder = sg->length - len; in mtk_aes_check_aligned()
207 sg->length = len; in mtk_aes_check_aligned()
211 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) in mtk_aes_check_aligned()
214 len -= sg->length; in mtk_aes_check_aligned()
220 static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes, in mtk_aes_set_mode() argument
224 aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode; in mtk_aes_set_mode()
229 struct scatterlist *sg = dma->sg; in mtk_aes_restore_sg()
230 int nents = dma->nents; in mtk_aes_restore_sg()
232 if (!dma->remainder) in mtk_aes_restore_sg()
235 while (--nents > 0 && sg) in mtk_aes_restore_sg()
241 sg->length += dma->remainder; in mtk_aes_restore_sg()
261 struct mtk_aes_rec *aes, in mtk_aes_complete() argument
264 aes->flags &= ~AES_FLAGS_BUSY; in mtk_aes_complete()
265 aes->areq->complete(aes->areq, err); in mtk_aes_complete()
267 tasklet_schedule(&aes->queue_task); in mtk_aes_complete()
275 static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_xmit() argument
277 struct mtk_ring *ring = cryp->ring[aes->id]; in mtk_aes_xmit()
279 struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg; in mtk_aes_xmit()
280 u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len; in mtk_aes_xmit()
285 cmd = ring->cmd_next; in mtk_aes_xmit()
286 cmd->hdr = MTK_DESC_BUF_LEN(ssg->length); in mtk_aes_xmit()
287 cmd->buf = cpu_to_le32(sg_dma_address(ssg)); in mtk_aes_xmit()
290 cmd->hdr |= MTK_DESC_FIRST | in mtk_aes_xmit()
291 MTK_DESC_CT_LEN(aes->ctx->ct_size); in mtk_aes_xmit()
292 cmd->ct = cpu_to_le32(aes->ctx->ct_dma); in mtk_aes_xmit()
293 cmd->ct_hdr = aes->ctx->ct_hdr; in mtk_aes_xmit()
294 cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma); in mtk_aes_xmit()
298 if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) in mtk_aes_xmit()
299 ring->cmd_next = ring->cmd_base; in mtk_aes_xmit()
301 cmd->hdr |= MTK_DESC_LAST; in mtk_aes_xmit()
305 res = ring->res_next; in mtk_aes_xmit()
306 res->hdr = MTK_DESC_BUF_LEN(dsg->length); in mtk_aes_xmit()
307 res->buf = cpu_to_le32(sg_dma_address(dsg)); in mtk_aes_xmit()
310 res->hdr |= MTK_DESC_FIRST; in mtk_aes_xmit()
313 if (++ring->res_next == ring->res_base + MTK_DESC_NUM) in mtk_aes_xmit()
314 ring->res_next = ring->res_base; in mtk_aes_xmit()
316 res->hdr |= MTK_DESC_LAST; in mtk_aes_xmit()
319 ring->res_prev = res; in mtk_aes_xmit()
322 if (aes->flags & AES_FLAGS_GCM) in mtk_aes_xmit()
323 res->hdr += AES_BLOCK_SIZE; in mtk_aes_xmit()
331 mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen)); in mtk_aes_xmit()
332 mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen)); in mtk_aes_xmit()
334 return -EINPROGRESS; in mtk_aes_xmit()
337 static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_unmap() argument
339 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_unmap()
341 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), in mtk_aes_unmap()
344 if (aes->src.sg == aes->dst.sg) { in mtk_aes_unmap()
345 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, in mtk_aes_unmap()
348 if (aes->src.sg != &aes->aligned_sg) in mtk_aes_unmap()
349 mtk_aes_restore_sg(&aes->src); in mtk_aes_unmap()
351 dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents, in mtk_aes_unmap()
354 if (aes->dst.sg != &aes->aligned_sg) in mtk_aes_unmap()
355 mtk_aes_restore_sg(&aes->dst); in mtk_aes_unmap()
357 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, in mtk_aes_unmap()
360 if (aes->src.sg != &aes->aligned_sg) in mtk_aes_unmap()
361 mtk_aes_restore_sg(&aes->src); in mtk_aes_unmap()
364 if (aes->dst.sg == &aes->aligned_sg) in mtk_aes_unmap()
365 sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst), in mtk_aes_unmap()
366 aes->buf, aes->total); in mtk_aes_unmap()
369 static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_map() argument
371 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_map()
372 struct mtk_aes_info *info = &ctx->info; in mtk_aes_map()
374 ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), in mtk_aes_map()
376 if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) in mtk_aes_map()
379 ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd); in mtk_aes_map()
381 if (aes->src.sg == aes->dst.sg) { in mtk_aes_map()
382 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, in mtk_aes_map()
383 aes->src.nents, in mtk_aes_map()
385 aes->dst.sg_len = aes->src.sg_len; in mtk_aes_map()
386 if (unlikely(!aes->src.sg_len)) in mtk_aes_map()
389 aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, in mtk_aes_map()
390 aes->src.nents, DMA_TO_DEVICE); in mtk_aes_map()
391 if (unlikely(!aes->src.sg_len)) in mtk_aes_map()
394 aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg, in mtk_aes_map()
395 aes->dst.nents, DMA_FROM_DEVICE); in mtk_aes_map()
396 if (unlikely(!aes->dst.sg_len)) { in mtk_aes_map()
397 dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, in mtk_aes_map()
403 return mtk_aes_xmit(cryp, aes); in mtk_aes_map()
406 dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE); in mtk_aes_map()
408 return mtk_aes_complete(cryp, aes, -EINVAL); in mtk_aes_map()
412 static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, in mtk_aes_info_init() argument
415 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); in mtk_aes_info_init()
416 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_info_init()
417 struct mtk_aes_info *info = &ctx->info; in mtk_aes_info_init()
420 ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); in mtk_aes_info_init()
421 info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len); in mtk_aes_info_init()
422 info->cmd[cnt++] = AES_CMD1; in mtk_aes_info_init()
424 info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode; in mtk_aes_info_init()
425 if (aes->flags & AES_FLAGS_ENCRYPT) in mtk_aes_info_init()
426 info->tfm[0] |= AES_TFM_BASIC_OUT; in mtk_aes_info_init()
428 info->tfm[0] |= AES_TFM_BASIC_IN; in mtk_aes_info_init()
430 switch (aes->flags & AES_FLAGS_CIPHER_MSK) { in mtk_aes_info_init()
432 info->tfm[1] = AES_TFM_CBC; in mtk_aes_info_init()
435 info->tfm[1] = AES_TFM_ECB; in mtk_aes_info_init()
438 info->tfm[1] = AES_TFM_CTR_LOAD; in mtk_aes_info_init()
446 mtk_aes_write_state_le(info->state + ctx->keylen, req->info, in mtk_aes_info_init()
449 info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE)); in mtk_aes_info_init()
450 info->tfm[1] |= AES_TFM_FULL_IV; in mtk_aes_info_init()
451 info->cmd[cnt++] = AES_CMD2; in mtk_aes_info_init()
453 ctx->ct_size = cnt; in mtk_aes_info_init()
456 static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, in mtk_aes_dma() argument
463 aes->total = len; in mtk_aes_dma()
464 aes->src.sg = src; in mtk_aes_dma()
465 aes->dst.sg = dst; in mtk_aes_dma()
466 aes->real_dst = dst; in mtk_aes_dma()
468 src_aligned = mtk_aes_check_aligned(src, len, &aes->src); in mtk_aes_dma()
472 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); in mtk_aes_dma()
478 return mtk_aes_complete(cryp, aes, -ENOMEM); in mtk_aes_dma()
481 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); in mtk_aes_dma()
482 aes->src.sg = &aes->aligned_sg; in mtk_aes_dma()
483 aes->src.nents = 1; in mtk_aes_dma()
484 aes->src.remainder = 0; in mtk_aes_dma()
488 aes->dst.sg = &aes->aligned_sg; in mtk_aes_dma()
489 aes->dst.nents = 1; in mtk_aes_dma()
490 aes->dst.remainder = 0; in mtk_aes_dma()
493 sg_init_table(&aes->aligned_sg, 1); in mtk_aes_dma()
494 sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen); in mtk_aes_dma()
497 mtk_aes_info_init(cryp, aes, len + padlen); in mtk_aes_dma()
499 return mtk_aes_map(cryp, aes); in mtk_aes_dma()
505 struct mtk_aes_rec *aes = cryp->aes[id]; in mtk_aes_handle_queue() local
511 spin_lock_irqsave(&aes->lock, flags); in mtk_aes_handle_queue()
513 ret = crypto_enqueue_request(&aes->queue, new_areq); in mtk_aes_handle_queue()
514 if (aes->flags & AES_FLAGS_BUSY) { in mtk_aes_handle_queue()
515 spin_unlock_irqrestore(&aes->lock, flags); in mtk_aes_handle_queue()
518 backlog = crypto_get_backlog(&aes->queue); in mtk_aes_handle_queue()
519 areq = crypto_dequeue_request(&aes->queue); in mtk_aes_handle_queue()
521 aes->flags |= AES_FLAGS_BUSY; in mtk_aes_handle_queue()
522 spin_unlock_irqrestore(&aes->lock, flags); in mtk_aes_handle_queue()
528 backlog->complete(backlog, -EINPROGRESS); in mtk_aes_handle_queue()
530 ctx = crypto_tfm_ctx(areq->tfm); in mtk_aes_handle_queue()
532 aes->areq = areq; in mtk_aes_handle_queue()
533 aes->ctx = ctx; in mtk_aes_handle_queue()
535 return ctx->start(cryp, aes); in mtk_aes_handle_queue()
539 struct mtk_aes_rec *aes) in mtk_aes_transfer_complete() argument
541 return mtk_aes_complete(cryp, aes, 0); in mtk_aes_transfer_complete()
544 static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_start() argument
546 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); in mtk_aes_start()
549 mtk_aes_set_mode(aes, rctx); in mtk_aes_start()
550 aes->resume = mtk_aes_transfer_complete; in mtk_aes_start()
552 return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes); in mtk_aes_start()
561 static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_ctr_transfer() argument
563 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_ctr_transfer()
565 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); in mtk_aes_ctr_transfer()
572 cctx->offset += aes->total; in mtk_aes_ctr_transfer()
573 if (cctx->offset >= req->nbytes) in mtk_aes_ctr_transfer()
574 return mtk_aes_transfer_complete(cryp, aes); in mtk_aes_ctr_transfer()
577 datalen = req->nbytes - cctx->offset; in mtk_aes_ctr_transfer()
579 ctr = be32_to_cpu(cctx->iv[3]); in mtk_aes_ctr_transfer()
583 end = start + blocks - 1; in mtk_aes_ctr_transfer()
586 datalen = AES_BLOCK_SIZE * -start; in mtk_aes_ctr_transfer()
591 src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset); in mtk_aes_ctr_transfer()
592 dst = ((req->src == req->dst) ? src : in mtk_aes_ctr_transfer()
593 scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset)); in mtk_aes_ctr_transfer()
596 mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv, in mtk_aes_ctr_transfer()
604 cctx->iv[3] = cpu_to_be32(ctr); in mtk_aes_ctr_transfer()
605 crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE); in mtk_aes_ctr_transfer()
608 return mtk_aes_dma(cryp, aes, src, dst, datalen); in mtk_aes_ctr_transfer()
611 static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_ctr_start() argument
613 struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx); in mtk_aes_ctr_start()
614 struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq); in mtk_aes_ctr_start()
617 mtk_aes_set_mode(aes, rctx); in mtk_aes_ctr_start()
619 memcpy(cctx->iv, req->info, AES_BLOCK_SIZE); in mtk_aes_ctr_start()
620 cctx->offset = 0; in mtk_aes_ctr_start()
621 aes->total = 0; in mtk_aes_ctr_start()
622 aes->resume = mtk_aes_ctr_transfer; in mtk_aes_ctr_start()
624 return mtk_aes_ctr_transfer(cryp, aes); in mtk_aes_ctr_start()
627 /* Check and set the AES key to transform state buffer */
635 ctx->keymode = AES_TFM_128BITS; in mtk_aes_setkey()
638 ctx->keymode = AES_TFM_192BITS; in mtk_aes_setkey()
641 ctx->keymode = AES_TFM_256BITS; in mtk_aes_setkey()
646 return -EINVAL; in mtk_aes_setkey()
649 ctx->keylen = SIZE_IN_WORDS(keylen); in mtk_aes_setkey()
650 mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); in mtk_aes_setkey()
662 rctx->mode = mode; in mtk_aes_crypt()
664 return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT), in mtk_aes_crypt()
665 &req->base); in mtk_aes_crypt()
703 cryp = mtk_aes_find_dev(&ctx->base); in mtk_aes_cra_init()
706 return -ENODEV; in mtk_aes_cra_init()
709 tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); in mtk_aes_cra_init()
710 ctx->base.start = mtk_aes_start; in mtk_aes_cra_init()
719 cryp = mtk_aes_find_dev(&ctx->base); in mtk_aes_ctr_cra_init()
722 return -ENODEV; in mtk_aes_ctr_cra_init()
725 tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx); in mtk_aes_ctr_cra_init()
726 ctx->base.start = mtk_aes_ctr_start; in mtk_aes_ctr_cra_init()
732 .cra_name = "cbc(aes)",
733 .cra_driver_name = "cbc-aes-mtk",
753 .cra_name = "ecb(aes)",
754 .cra_driver_name = "ecb-aes-mtk",
773 .cra_name = "ctr(aes)",
774 .cra_driver_name = "ctr-aes-mtk",
806 struct mtk_aes_rec *aes) in mtk_aes_gcm_tag_verify() argument
808 u32 status = cryp->ring[aes->id]->res_prev->ct; in mtk_aes_gcm_tag_verify()
810 return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ? in mtk_aes_gcm_tag_verify()
811 -EBADMSG : 0); in mtk_aes_gcm_tag_verify()
814 /* Initialize transform information of GCM mode */
816 struct mtk_aes_rec *aes, in mtk_aes_gcm_info_init() argument
819 struct aead_request *req = aead_request_cast(aes->areq); in mtk_aes_gcm_info_init()
820 struct mtk_aes_base_ctx *ctx = aes->ctx; in mtk_aes_gcm_info_init()
822 struct mtk_aes_info *info = &ctx->info; in mtk_aes_gcm_info_init()
826 ctx->ct_hdr = AES_CT_CTRL_HDR | len; in mtk_aes_gcm_info_init()
828 info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); in mtk_aes_gcm_info_init()
829 info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); in mtk_aes_gcm_info_init()
830 info->cmd[cnt++] = AES_GCM_CMD2; in mtk_aes_gcm_info_init()
831 info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); in mtk_aes_gcm_info_init()
833 if (aes->flags & AES_FLAGS_ENCRYPT) { in mtk_aes_gcm_info_init()
834 info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); in mtk_aes_gcm_info_init()
835 info->tfm[0] = AES_TFM_GCM_OUT; in mtk_aes_gcm_info_init()
837 info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); in mtk_aes_gcm_info_init()
838 info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); in mtk_aes_gcm_info_init()
839 info->tfm[0] = AES_TFM_GCM_IN; in mtk_aes_gcm_info_init()
841 ctx->ct_size = cnt; in mtk_aes_gcm_info_init()
843 info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE( in mtk_aes_gcm_info_init()
844 ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) | in mtk_aes_gcm_info_init()
845 ctx->keymode; in mtk_aes_gcm_info_init()
846 info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV | in mtk_aes_gcm_info_init()
849 mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS( in mtk_aes_gcm_info_init()
850 AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize); in mtk_aes_gcm_info_init()
853 static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, in mtk_aes_gcm_dma() argument
859 aes->src.sg = src; in mtk_aes_gcm_dma()
860 aes->dst.sg = dst; in mtk_aes_gcm_dma()
861 aes->real_dst = dst; in mtk_aes_gcm_dma()
863 src_aligned = mtk_aes_check_aligned(src, len, &aes->src); in mtk_aes_gcm_dma()
867 dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); in mtk_aes_gcm_dma()
870 if (aes->total > AES_BUF_SIZE) in mtk_aes_gcm_dma()
871 return mtk_aes_complete(cryp, aes, -ENOMEM); in mtk_aes_gcm_dma()
874 sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); in mtk_aes_gcm_dma()
875 aes->src.sg = &aes->aligned_sg; in mtk_aes_gcm_dma()
876 aes->src.nents = 1; in mtk_aes_gcm_dma()
877 aes->src.remainder = 0; in mtk_aes_gcm_dma()
881 aes->dst.sg = &aes->aligned_sg; in mtk_aes_gcm_dma()
882 aes->dst.nents = 1; in mtk_aes_gcm_dma()
883 aes->dst.remainder = 0; in mtk_aes_gcm_dma()
886 sg_init_table(&aes->aligned_sg, 1); in mtk_aes_gcm_dma()
887 sg_set_buf(&aes->aligned_sg, aes->buf, aes->total); in mtk_aes_gcm_dma()
890 mtk_aes_gcm_info_init(cryp, aes, len); in mtk_aes_gcm_dma()
892 return mtk_aes_map(cryp, aes); in mtk_aes_gcm_dma()
896 static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) in mtk_aes_gcm_start() argument
898 struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx); in mtk_aes_gcm_start()
899 struct aead_request *req = aead_request_cast(aes->areq); in mtk_aes_gcm_start()
901 u32 len = req->assoclen + req->cryptlen; in mtk_aes_gcm_start()
903 mtk_aes_set_mode(aes, rctx); in mtk_aes_gcm_start()
905 if (aes->flags & AES_FLAGS_ENCRYPT) { in mtk_aes_gcm_start()
908 aes->resume = mtk_aes_transfer_complete; in mtk_aes_gcm_start()
910 aes->total = len + gctx->authsize; in mtk_aes_gcm_start()
912 gctx->textlen = req->cryptlen; in mtk_aes_gcm_start()
914 scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); in mtk_aes_gcm_start()
916 aes->resume = mtk_aes_gcm_tag_verify; in mtk_aes_gcm_start()
917 aes->total = len; in mtk_aes_gcm_start()
918 gctx->textlen = req->cryptlen - gctx->authsize; in mtk_aes_gcm_start()
921 return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); in mtk_aes_gcm_start()
931 if (!gctx->textlen && !req->assoclen) in mtk_aes_gcm_crypt()
932 return -EINVAL; in mtk_aes_gcm_crypt()
934 rctx->mode = AES_FLAGS_GCM | mode; in mtk_aes_gcm_crypt()
936 return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT), in mtk_aes_gcm_crypt()
937 &req->base); in mtk_aes_gcm_crypt()
941 * Because of the hardware limitation, we need to pre-calculate key(H)
950 struct crypto_skcipher *ctr = gctx->ctr; in mtk_aes_gcm_setkey()
964 ctx->keymode = AES_TFM_128BITS; in mtk_aes_gcm_setkey()
967 ctx->keymode = AES_TFM_192BITS; in mtk_aes_gcm_setkey()
970 ctx->keymode = AES_TFM_256BITS; in mtk_aes_gcm_setkey()
975 return -EINVAL; in mtk_aes_gcm_setkey()
978 ctx->keylen = SIZE_IN_WORDS(keylen); in mtk_aes_gcm_setkey()
980 /* Same as crypto_gcm_setkey() from crypto/gcm.c */ in mtk_aes_gcm_setkey()
993 return -ENOMEM; in mtk_aes_gcm_setkey()
995 crypto_init_wait(&data->wait); in mtk_aes_gcm_setkey()
996 sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); in mtk_aes_gcm_setkey()
997 skcipher_request_set_tfm(&data->req, ctr); in mtk_aes_gcm_setkey()
998 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | in mtk_aes_gcm_setkey()
1000 crypto_req_done, &data->wait); in mtk_aes_gcm_setkey()
1001 skcipher_request_set_crypt(&data->req, data->sg, data->sg, in mtk_aes_gcm_setkey()
1002 AES_BLOCK_SIZE, data->iv); in mtk_aes_gcm_setkey()
1004 err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), in mtk_aes_gcm_setkey()
1005 &data->wait); in mtk_aes_gcm_setkey()
1010 mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen); in mtk_aes_gcm_setkey()
1012 mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash, in mtk_aes_gcm_setkey()
1025 /* Same as crypto_gcm_authsize() from crypto/gcm.c */ in mtk_aes_gcm_setauthsize()
1032 return -EINVAL; in mtk_aes_gcm_setauthsize()
1035 gctx->authsize = authsize; in mtk_aes_gcm_setauthsize()
1054 cryp = mtk_aes_find_dev(&ctx->base); in mtk_aes_gcm_init()
1057 return -ENODEV; in mtk_aes_gcm_init()
1060 ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0, in mtk_aes_gcm_init()
1062 if (IS_ERR(ctx->ctr)) { in mtk_aes_gcm_init()
1063 pr_err("Error allocating ctr(aes)\n"); in mtk_aes_gcm_init()
1064 return PTR_ERR(ctx->ctr); in mtk_aes_gcm_init()
1068 ctx->base.start = mtk_aes_gcm_start; in mtk_aes_gcm_init()
1076 crypto_free_skcipher(ctx->ctr); in mtk_aes_gcm_exit()
1090 .cra_name = "gcm(aes)",
1091 .cra_driver_name = "gcm-aes-mtk",
1103 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; in mtk_aes_queue_task() local
1105 mtk_aes_handle_queue(aes->cryp, aes->id, NULL); in mtk_aes_queue_task()
1110 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; in mtk_aes_done_task() local
1111 struct mtk_cryp *cryp = aes->cryp; in mtk_aes_done_task()
1113 mtk_aes_unmap(cryp, aes); in mtk_aes_done_task()
1114 aes->resume(cryp, aes); in mtk_aes_done_task()
1119 struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id; in mtk_aes_irq() local
1120 struct mtk_cryp *cryp = aes->cryp; in mtk_aes_irq()
1121 u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id)); in mtk_aes_irq()
1123 mtk_aes_write(cryp, RDR_STAT(aes->id), val); in mtk_aes_irq()
1125 if (likely(AES_FLAGS_BUSY & aes->flags)) { in mtk_aes_irq()
1126 mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST); in mtk_aes_irq()
1127 mtk_aes_write(cryp, RDR_THRESH(aes->id), in mtk_aes_irq()
1130 tasklet_schedule(&aes->done_task); in mtk_aes_irq()
1132 dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); in mtk_aes_irq()
1145 struct mtk_aes_rec **aes = cryp->aes; in mtk_aes_record_init() local
1146 int i, err = -ENOMEM; in mtk_aes_record_init()
1149 aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL); in mtk_aes_record_init()
1150 if (!aes[i]) in mtk_aes_record_init()
1153 aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL, in mtk_aes_record_init()
1155 if (!aes[i]->buf) in mtk_aes_record_init()
1158 aes[i]->cryp = cryp; in mtk_aes_record_init()
1160 spin_lock_init(&aes[i]->lock); in mtk_aes_record_init()
1161 crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE); in mtk_aes_record_init()
1163 tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task, in mtk_aes_record_init()
1164 (unsigned long)aes[i]); in mtk_aes_record_init()
1165 tasklet_init(&aes[i]->done_task, mtk_aes_done_task, in mtk_aes_record_init()
1166 (unsigned long)aes[i]); in mtk_aes_record_init()
1170 aes[0]->id = MTK_RING0; in mtk_aes_record_init()
1171 aes[1]->id = MTK_RING1; in mtk_aes_record_init()
1176 for (; i--; ) { in mtk_aes_record_init()
1177 free_page((unsigned long)aes[i]->buf); in mtk_aes_record_init()
1178 kfree(aes[i]); in mtk_aes_record_init()
1189 tasklet_kill(&cryp->aes[i]->done_task); in mtk_aes_record_free()
1190 tasklet_kill(&cryp->aes[i]->queue_task); in mtk_aes_record_free()
1192 free_page((unsigned long)cryp->aes[i]->buf); in mtk_aes_record_free()
1193 kfree(cryp->aes[i]); in mtk_aes_record_free()
1224 for (; i--; ) in mtk_aes_register_algs()
1234 INIT_LIST_HEAD(&cryp->aes_list); in mtk_cipher_alg_register()
1241 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq, in mtk_cipher_alg_register()
1242 0, "mtk-aes", cryp->aes[0]); in mtk_cipher_alg_register()
1244 dev_err(cryp->dev, "unable to request AES irq.\n"); in mtk_cipher_alg_register()
1248 ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq, in mtk_cipher_alg_register()
1249 0, "mtk-aes", cryp->aes[1]); in mtk_cipher_alg_register()
1251 dev_err(cryp->dev, "unable to request AES irq.\n"); in mtk_cipher_alg_register()
1260 list_add_tail(&cryp->aes_list, &mtk_aes.dev_list); in mtk_cipher_alg_register()
1271 list_del(&cryp->aes_list); in mtk_cipher_alg_register()
1277 dev_err(cryp->dev, "mtk-aes initialization failed.\n"); in mtk_cipher_alg_register()
1284 list_del(&cryp->aes_list); in mtk_cipher_alg_release()