Lines Matching +full:frc +full:- +full:shared
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
18 #include "dpseci-debugfs.h"
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
35 * being processed. This can be added by the dpaa2-eth driver. This would
38 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
64 * struct caam_ctx - per-session context
94 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) : in dpaa2_caam_iova_to_virt()
101 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
108 * @flags - flags that would be used for the equivalent kmalloc(..) call
118 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
120 * @obj - buffer previously allocated by qi_cache_zalloc
132 switch (crypto_tfm_alg_type(areq->tfm)) { in to_caam_req()
141 return ERR_PTR(-EINVAL); in to_caam_req()
173 struct device *dev = ctx->dev; in aead_set_sh_desc()
181 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == in aead_set_sh_desc()
183 const bool is_rfc3686 = alg->caam.rfc3686; in aead_set_sh_desc()
185 if (!ctx->cdata.keylen || !ctx->authsize) in aead_set_sh_desc()
189 * AES-CTR needs to load IV in CONTEXT1 reg in aead_set_sh_desc()
202 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + in aead_set_sh_desc()
203 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); in aead_set_sh_desc()
212 ctx->adata.key_virt = ctx->key; in aead_set_sh_desc()
213 ctx->adata.key_dma = ctx->key_dma; in aead_set_sh_desc()
215 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; in aead_set_sh_desc()
216 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; in aead_set_sh_desc()
218 data_len[0] = ctx->adata.keylen_pad; in aead_set_sh_desc()
219 data_len[1] = ctx->cdata.keylen; in aead_set_sh_desc()
221 /* aead_encrypt shared descriptor */ in aead_set_sh_desc()
222 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN : in aead_set_sh_desc()
227 return -EINVAL; in aead_set_sh_desc()
229 ctx->adata.key_inline = !!(inl_mask & 1); in aead_set_sh_desc()
230 ctx->cdata.key_inline = !!(inl_mask & 2); in aead_set_sh_desc()
232 flc = &ctx->flc[ENCRYPT]; in aead_set_sh_desc()
233 desc = flc->sh_desc; in aead_set_sh_desc()
235 if (alg->caam.geniv) in aead_set_sh_desc()
236 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, in aead_set_sh_desc()
237 ivsize, ctx->authsize, is_rfc3686, in aead_set_sh_desc()
239 priv->sec_attr.era); in aead_set_sh_desc()
241 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, in aead_set_sh_desc()
242 ivsize, ctx->authsize, is_rfc3686, nonce, in aead_set_sh_desc()
243 ctx1_iv_off, true, priv->sec_attr.era); in aead_set_sh_desc()
245 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in aead_set_sh_desc()
246 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in aead_set_sh_desc()
247 sizeof(flc->flc) + desc_bytes(desc), in aead_set_sh_desc()
248 ctx->dir); in aead_set_sh_desc()
250 /* aead_decrypt shared descriptor */ in aead_set_sh_desc()
255 return -EINVAL; in aead_set_sh_desc()
257 ctx->adata.key_inline = !!(inl_mask & 1); in aead_set_sh_desc()
258 ctx->cdata.key_inline = !!(inl_mask & 2); in aead_set_sh_desc()
260 flc = &ctx->flc[DECRYPT]; in aead_set_sh_desc()
261 desc = flc->sh_desc; in aead_set_sh_desc()
262 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, in aead_set_sh_desc()
263 ivsize, ctx->authsize, alg->caam.geniv, in aead_set_sh_desc()
265 priv->sec_attr.era); in aead_set_sh_desc()
266 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in aead_set_sh_desc()
267 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in aead_set_sh_desc()
268 sizeof(flc->flc) + desc_bytes(desc), in aead_set_sh_desc()
269 ctx->dir); in aead_set_sh_desc()
278 ctx->authsize = authsize; in aead_setauthsize()
288 struct device *dev = ctx->dev; in aead_setkey()
300 ctx->adata.keylen = keys.authkeylen; in aead_setkey()
301 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & in aead_setkey()
304 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) in aead_setkey()
307 memcpy(ctx->key, keys.authkey, keys.authkeylen); in aead_setkey()
308 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); in aead_setkey()
309 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad + in aead_setkey()
310 keys.enckeylen, ctx->dir); in aead_setkey()
312 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, in aead_setkey()
313 ctx->adata.keylen_pad + keys.enckeylen, 1); in aead_setkey()
315 ctx->cdata.keylen = keys.enckeylen; in aead_setkey()
321 return -EINVAL; in aead_setkey()
334 err = -EINVAL; in des3_aead_setkey()
351 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in aead_edesc_alloc()
352 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in aead_edesc_alloc()
356 struct device *dev = ctx->dev; in aead_edesc_alloc()
357 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in aead_edesc_alloc()
364 unsigned int authsize = ctx->authsize; in aead_edesc_alloc()
373 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
376 if (unlikely(req->dst != req->src)) { in aead_edesc_alloc()
377 src_len = req->assoclen + req->cryptlen; in aead_edesc_alloc()
378 dst_len = src_len + (encrypt ? authsize : (-authsize)); in aead_edesc_alloc()
380 src_nents = sg_nents_for_len(req->src, src_len); in aead_edesc_alloc()
388 dst_nents = sg_nents_for_len(req->dst, dst_len); in aead_edesc_alloc()
397 mapped_src_nents = dma_map_sg(dev, req->src, src_nents, in aead_edesc_alloc()
402 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
409 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, in aead_edesc_alloc()
413 dma_unmap_sg(dev, req->src, src_nents, in aead_edesc_alloc()
416 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
422 src_len = req->assoclen + req->cryptlen + in aead_edesc_alloc()
425 src_nents = sg_nents_for_len(req->src, src_len); in aead_edesc_alloc()
433 mapped_src_nents = dma_map_sg(dev, req->src, src_nents, in aead_edesc_alloc()
438 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
442 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) in aead_edesc_alloc()
446 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. in aead_edesc_alloc()
460 else if ((req->src == req->dst) && (mapped_src_nents > 1)) in aead_edesc_alloc()
467 sg_table = &edesc->sgt[0]; in aead_edesc_alloc()
473 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, in aead_edesc_alloc()
476 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
483 memcpy(iv, req->iv, ivsize); in aead_edesc_alloc()
488 caam_unmap(dev, req->src, req->dst, src_nents, in aead_edesc_alloc()
491 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
495 edesc->src_nents = src_nents; in aead_edesc_alloc()
496 edesc->dst_nents = dst_nents; in aead_edesc_alloc()
497 edesc->iv_dma = iv_dma; in aead_edesc_alloc()
499 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) == in aead_edesc_alloc()
505 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize); in aead_edesc_alloc()
507 edesc->assoclen = cpu_to_caam32(req->assoclen); in aead_edesc_alloc()
508 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4, in aead_edesc_alloc()
510 if (dma_mapping_error(dev, edesc->assoclen_dma)) { in aead_edesc_alloc()
512 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
515 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
518 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); in aead_edesc_alloc()
524 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); in aead_edesc_alloc()
528 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); in aead_edesc_alloc()
533 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); in aead_edesc_alloc()
534 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, in aead_edesc_alloc()
537 return ERR_PTR(-ENOMEM); in aead_edesc_alloc()
540 edesc->qm_sg_dma = qm_sg_dma; in aead_edesc_alloc()
541 edesc->qm_sg_bytes = qm_sg_bytes; in aead_edesc_alloc()
543 out_len = req->assoclen + req->cryptlen + in aead_edesc_alloc()
544 (encrypt ? ctx->authsize : (-ctx->authsize)); in aead_edesc_alloc()
545 in_len = 4 + ivsize + req->assoclen + req->cryptlen; in aead_edesc_alloc()
547 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in aead_edesc_alloc()
553 if (req->dst == req->src) { in aead_edesc_alloc()
556 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src)); in aead_edesc_alloc()
572 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); in aead_edesc_alloc()
589 struct device *dev = ctx->dev; in chachapoly_set_sh_desc()
593 if (!ctx->cdata.keylen || !ctx->authsize) in chachapoly_set_sh_desc()
596 flc = &ctx->flc[ENCRYPT]; in chachapoly_set_sh_desc()
597 desc = flc->sh_desc; in chachapoly_set_sh_desc()
598 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, in chachapoly_set_sh_desc()
599 ctx->authsize, true, true); in chachapoly_set_sh_desc()
600 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in chachapoly_set_sh_desc()
601 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in chachapoly_set_sh_desc()
602 sizeof(flc->flc) + desc_bytes(desc), in chachapoly_set_sh_desc()
603 ctx->dir); in chachapoly_set_sh_desc()
605 flc = &ctx->flc[DECRYPT]; in chachapoly_set_sh_desc()
606 desc = flc->sh_desc; in chachapoly_set_sh_desc()
607 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, in chachapoly_set_sh_desc()
608 ctx->authsize, false, true); in chachapoly_set_sh_desc()
609 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in chachapoly_set_sh_desc()
610 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in chachapoly_set_sh_desc()
611 sizeof(flc->flc) + desc_bytes(desc), in chachapoly_set_sh_desc()
612 ctx->dir); in chachapoly_set_sh_desc()
623 return -EINVAL; in chachapoly_setauthsize()
625 ctx->authsize = authsize; in chachapoly_setauthsize()
634 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; in chachapoly_setkey()
637 return -EINVAL; in chachapoly_setkey()
639 memcpy(ctx->key, key, keylen); in chachapoly_setkey()
640 ctx->cdata.key_virt = ctx->key; in chachapoly_setkey()
641 ctx->cdata.keylen = keylen - saltlen; in chachapoly_setkey()
649 struct device *dev = ctx->dev; in gcm_set_sh_desc()
653 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - in gcm_set_sh_desc()
654 ctx->cdata.keylen; in gcm_set_sh_desc()
656 if (!ctx->cdata.keylen || !ctx->authsize) in gcm_set_sh_desc()
660 * AES GCM encrypt shared descriptor in gcm_set_sh_desc()
661 * Job Descriptor and Shared Descriptor in gcm_set_sh_desc()
662 * must fit into the 64-word Descriptor h/w Buffer in gcm_set_sh_desc()
665 ctx->cdata.key_inline = true; in gcm_set_sh_desc()
666 ctx->cdata.key_virt = ctx->key; in gcm_set_sh_desc()
668 ctx->cdata.key_inline = false; in gcm_set_sh_desc()
669 ctx->cdata.key_dma = ctx->key_dma; in gcm_set_sh_desc()
672 flc = &ctx->flc[ENCRYPT]; in gcm_set_sh_desc()
673 desc = flc->sh_desc; in gcm_set_sh_desc()
674 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true); in gcm_set_sh_desc()
675 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in gcm_set_sh_desc()
676 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in gcm_set_sh_desc()
677 sizeof(flc->flc) + desc_bytes(desc), in gcm_set_sh_desc()
678 ctx->dir); in gcm_set_sh_desc()
681 * Job Descriptor and Shared Descriptors in gcm_set_sh_desc()
682 * must all fit into the 64-word Descriptor h/w Buffer in gcm_set_sh_desc()
685 ctx->cdata.key_inline = true; in gcm_set_sh_desc()
686 ctx->cdata.key_virt = ctx->key; in gcm_set_sh_desc()
688 ctx->cdata.key_inline = false; in gcm_set_sh_desc()
689 ctx->cdata.key_dma = ctx->key_dma; in gcm_set_sh_desc()
692 flc = &ctx->flc[DECRYPT]; in gcm_set_sh_desc()
693 desc = flc->sh_desc; in gcm_set_sh_desc()
694 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true); in gcm_set_sh_desc()
695 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in gcm_set_sh_desc()
696 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in gcm_set_sh_desc()
697 sizeof(flc->flc) + desc_bytes(desc), in gcm_set_sh_desc()
698 ctx->dir); in gcm_set_sh_desc()
712 ctx->authsize = authsize; in gcm_setauthsize()
722 struct device *dev = ctx->dev; in gcm_setkey()
731 memcpy(ctx->key, key, keylen); in gcm_setkey()
732 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir); in gcm_setkey()
733 ctx->cdata.keylen = keylen; in gcm_setkey()
741 struct device *dev = ctx->dev; in rfc4106_set_sh_desc()
745 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - in rfc4106_set_sh_desc()
746 ctx->cdata.keylen; in rfc4106_set_sh_desc()
748 if (!ctx->cdata.keylen || !ctx->authsize) in rfc4106_set_sh_desc()
751 ctx->cdata.key_virt = ctx->key; in rfc4106_set_sh_desc()
754 * RFC4106 encrypt shared descriptor in rfc4106_set_sh_desc()
755 * Job Descriptor and Shared Descriptor in rfc4106_set_sh_desc()
756 * must fit into the 64-word Descriptor h/w Buffer in rfc4106_set_sh_desc()
759 ctx->cdata.key_inline = true; in rfc4106_set_sh_desc()
761 ctx->cdata.key_inline = false; in rfc4106_set_sh_desc()
762 ctx->cdata.key_dma = ctx->key_dma; in rfc4106_set_sh_desc()
765 flc = &ctx->flc[ENCRYPT]; in rfc4106_set_sh_desc()
766 desc = flc->sh_desc; in rfc4106_set_sh_desc()
767 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, in rfc4106_set_sh_desc()
769 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in rfc4106_set_sh_desc()
770 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in rfc4106_set_sh_desc()
771 sizeof(flc->flc) + desc_bytes(desc), in rfc4106_set_sh_desc()
772 ctx->dir); in rfc4106_set_sh_desc()
775 * Job Descriptor and Shared Descriptors in rfc4106_set_sh_desc()
776 * must all fit into the 64-word Descriptor h/w Buffer in rfc4106_set_sh_desc()
779 ctx->cdata.key_inline = true; in rfc4106_set_sh_desc()
781 ctx->cdata.key_inline = false; in rfc4106_set_sh_desc()
782 ctx->cdata.key_dma = ctx->key_dma; in rfc4106_set_sh_desc()
785 flc = &ctx->flc[DECRYPT]; in rfc4106_set_sh_desc()
786 desc = flc->sh_desc; in rfc4106_set_sh_desc()
787 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, in rfc4106_set_sh_desc()
789 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in rfc4106_set_sh_desc()
790 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in rfc4106_set_sh_desc()
791 sizeof(flc->flc) + desc_bytes(desc), in rfc4106_set_sh_desc()
792 ctx->dir); in rfc4106_set_sh_desc()
807 ctx->authsize = authsize; in rfc4106_setauthsize()
817 struct device *dev = ctx->dev; in rfc4106_setkey()
820 ret = aes_check_keylen(keylen - 4); in rfc4106_setkey()
827 memcpy(ctx->key, key, keylen); in rfc4106_setkey()
832 ctx->cdata.keylen = keylen - 4; in rfc4106_setkey()
833 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, in rfc4106_setkey()
834 ctx->dir); in rfc4106_setkey()
842 struct device *dev = ctx->dev; in rfc4543_set_sh_desc()
846 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - in rfc4543_set_sh_desc()
847 ctx->cdata.keylen; in rfc4543_set_sh_desc()
849 if (!ctx->cdata.keylen || !ctx->authsize) in rfc4543_set_sh_desc()
852 ctx->cdata.key_virt = ctx->key; in rfc4543_set_sh_desc()
855 * RFC4543 encrypt shared descriptor in rfc4543_set_sh_desc()
856 * Job Descriptor and Shared Descriptor in rfc4543_set_sh_desc()
857 * must fit into the 64-word Descriptor h/w Buffer in rfc4543_set_sh_desc()
860 ctx->cdata.key_inline = true; in rfc4543_set_sh_desc()
862 ctx->cdata.key_inline = false; in rfc4543_set_sh_desc()
863 ctx->cdata.key_dma = ctx->key_dma; in rfc4543_set_sh_desc()
866 flc = &ctx->flc[ENCRYPT]; in rfc4543_set_sh_desc()
867 desc = flc->sh_desc; in rfc4543_set_sh_desc()
868 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, in rfc4543_set_sh_desc()
870 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in rfc4543_set_sh_desc()
871 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in rfc4543_set_sh_desc()
872 sizeof(flc->flc) + desc_bytes(desc), in rfc4543_set_sh_desc()
873 ctx->dir); in rfc4543_set_sh_desc()
876 * Job Descriptor and Shared Descriptors in rfc4543_set_sh_desc()
877 * must all fit into the 64-word Descriptor h/w Buffer in rfc4543_set_sh_desc()
880 ctx->cdata.key_inline = true; in rfc4543_set_sh_desc()
882 ctx->cdata.key_inline = false; in rfc4543_set_sh_desc()
883 ctx->cdata.key_dma = ctx->key_dma; in rfc4543_set_sh_desc()
886 flc = &ctx->flc[DECRYPT]; in rfc4543_set_sh_desc()
887 desc = flc->sh_desc; in rfc4543_set_sh_desc()
888 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, in rfc4543_set_sh_desc()
890 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in rfc4543_set_sh_desc()
891 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in rfc4543_set_sh_desc()
892 sizeof(flc->flc) + desc_bytes(desc), in rfc4543_set_sh_desc()
893 ctx->dir); in rfc4543_set_sh_desc()
904 return -EINVAL; in rfc4543_setauthsize()
906 ctx->authsize = authsize; in rfc4543_setauthsize()
916 struct device *dev = ctx->dev; in rfc4543_setkey()
919 ret = aes_check_keylen(keylen - 4); in rfc4543_setkey()
926 memcpy(ctx->key, key, keylen); in rfc4543_setkey()
931 ctx->cdata.keylen = keylen - 4; in rfc4543_setkey()
932 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, in rfc4543_setkey()
933 ctx->dir); in rfc4543_setkey()
945 struct device *dev = ctx->dev; in skcipher_setkey()
949 const bool is_rfc3686 = alg->caam.rfc3686; in skcipher_setkey()
954 ctx->cdata.keylen = keylen; in skcipher_setkey()
955 ctx->cdata.key_virt = key; in skcipher_setkey()
956 ctx->cdata.key_inline = true; in skcipher_setkey()
958 /* skcipher_encrypt shared descriptor */ in skcipher_setkey()
959 flc = &ctx->flc[ENCRYPT]; in skcipher_setkey()
960 desc = flc->sh_desc; in skcipher_setkey()
961 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, in skcipher_setkey()
963 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in skcipher_setkey()
964 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in skcipher_setkey()
965 sizeof(flc->flc) + desc_bytes(desc), in skcipher_setkey()
966 ctx->dir); in skcipher_setkey()
968 /* skcipher_decrypt shared descriptor */ in skcipher_setkey()
969 flc = &ctx->flc[DECRYPT]; in skcipher_setkey()
970 desc = flc->sh_desc; in skcipher_setkey()
971 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, in skcipher_setkey()
973 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in skcipher_setkey()
974 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in skcipher_setkey()
975 sizeof(flc->flc) + desc_bytes(desc), in skcipher_setkey()
976 ctx->dir); in skcipher_setkey()
1005 keylen -= CTR_RFC3686_NONCE_SIZE; in rfc3686_skcipher_setkey()
1021 * AES-CTR needs to load IV in CONTEXT1 reg in ctr_skcipher_setkey()
1038 return -EINVAL; in chacha20_skcipher_setkey()
1061 struct device *dev = ctx->dev; in xts_skcipher_setkey()
1074 ctx->xts_key_fallback = true; in xts_skcipher_setkey()
1076 if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) { in xts_skcipher_setkey()
1077 err = crypto_skcipher_setkey(ctx->fallback, key, keylen); in xts_skcipher_setkey()
1082 ctx->cdata.keylen = keylen; in xts_skcipher_setkey()
1083 ctx->cdata.key_virt = key; in xts_skcipher_setkey()
1084 ctx->cdata.key_inline = true; in xts_skcipher_setkey()
1086 /* xts_skcipher_encrypt shared descriptor */ in xts_skcipher_setkey()
1087 flc = &ctx->flc[ENCRYPT]; in xts_skcipher_setkey()
1088 desc = flc->sh_desc; in xts_skcipher_setkey()
1089 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata); in xts_skcipher_setkey()
1090 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in xts_skcipher_setkey()
1091 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], in xts_skcipher_setkey()
1092 sizeof(flc->flc) + desc_bytes(desc), in xts_skcipher_setkey()
1093 ctx->dir); in xts_skcipher_setkey()
1095 /* xts_skcipher_decrypt shared descriptor */ in xts_skcipher_setkey()
1096 flc = &ctx->flc[DECRYPT]; in xts_skcipher_setkey()
1097 desc = flc->sh_desc; in xts_skcipher_setkey()
1098 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata); in xts_skcipher_setkey()
1099 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in xts_skcipher_setkey()
1100 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], in xts_skcipher_setkey()
1101 sizeof(flc->flc) + desc_bytes(desc), in xts_skcipher_setkey()
1102 ctx->dir); in xts_skcipher_setkey()
1111 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in skcipher_edesc_alloc()
1112 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in skcipher_edesc_alloc()
1114 struct device *dev = ctx->dev; in skcipher_edesc_alloc()
1115 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in skcipher_edesc_alloc()
1125 src_nents = sg_nents_for_len(req->src, req->cryptlen); in skcipher_edesc_alloc()
1128 req->cryptlen); in skcipher_edesc_alloc()
1132 if (unlikely(req->dst != req->src)) { in skcipher_edesc_alloc()
1133 dst_nents = sg_nents_for_len(req->dst, req->cryptlen); in skcipher_edesc_alloc()
1136 req->cryptlen); in skcipher_edesc_alloc()
1140 mapped_src_nents = dma_map_sg(dev, req->src, src_nents, in skcipher_edesc_alloc()
1144 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1147 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, in skcipher_edesc_alloc()
1151 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); in skcipher_edesc_alloc()
1152 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1155 mapped_src_nents = dma_map_sg(dev, req->src, src_nents, in skcipher_edesc_alloc()
1159 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1174 if (req->src != req->dst) in skcipher_edesc_alloc()
1184 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1186 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1193 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1195 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1199 sg_table = &edesc->sgt[0]; in skcipher_edesc_alloc()
1201 memcpy(iv, req->iv, ivsize); in skcipher_edesc_alloc()
1206 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, in skcipher_edesc_alloc()
1209 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1212 edesc->src_nents = src_nents; in skcipher_edesc_alloc()
1213 edesc->dst_nents = dst_nents; in skcipher_edesc_alloc()
1214 edesc->iv_dma = iv_dma; in skcipher_edesc_alloc()
1215 edesc->qm_sg_bytes = qm_sg_bytes; in skcipher_edesc_alloc()
1218 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); in skcipher_edesc_alloc()
1220 if (req->src != req->dst) in skcipher_edesc_alloc()
1221 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); in skcipher_edesc_alloc()
1226 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, in skcipher_edesc_alloc()
1228 if (dma_mapping_error(dev, edesc->qm_sg_dma)) { in skcipher_edesc_alloc()
1230 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, in skcipher_edesc_alloc()
1233 return ERR_PTR(-ENOMEM); in skcipher_edesc_alloc()
1236 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in skcipher_edesc_alloc()
1238 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); in skcipher_edesc_alloc()
1239 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize); in skcipher_edesc_alloc()
1242 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in skcipher_edesc_alloc()
1246 if (req->src == req->dst) in skcipher_edesc_alloc()
1247 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + in skcipher_edesc_alloc()
1250 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * in skcipher_edesc_alloc()
1262 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, in aead_unmap()
1263 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, in aead_unmap()
1264 edesc->qm_sg_bytes); in aead_unmap()
1265 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); in aead_unmap()
1274 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, in skcipher_unmap()
1275 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, in skcipher_unmap()
1276 edesc->qm_sg_bytes); in skcipher_unmap()
1285 struct aead_edesc *edesc = req_ctx->edesc; in aead_encrypt_done()
1290 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in aead_encrypt_done()
1293 ecode = caam_qi2_strstatus(ctx->dev, status); in aead_encrypt_done()
1295 aead_unmap(ctx->dev, edesc, req); in aead_encrypt_done()
1306 struct aead_edesc *edesc = req_ctx->edesc; in aead_decrypt_done()
1311 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in aead_decrypt_done()
1314 ecode = caam_qi2_strstatus(ctx->dev, status); in aead_decrypt_done()
1316 aead_unmap(ctx->dev, edesc, req); in aead_decrypt_done()
1334 caam_req->flc = &ctx->flc[ENCRYPT]; in aead_encrypt()
1335 caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; in aead_encrypt()
1336 caam_req->cbk = aead_encrypt_done; in aead_encrypt()
1337 caam_req->ctx = &req->base; in aead_encrypt()
1338 caam_req->edesc = edesc; in aead_encrypt()
1339 ret = dpaa2_caam_enqueue(ctx->dev, caam_req); in aead_encrypt()
1340 if (ret != -EINPROGRESS && in aead_encrypt()
1341 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { in aead_encrypt()
1342 aead_unmap(ctx->dev, edesc, req); in aead_encrypt()
1362 caam_req->flc = &ctx->flc[DECRYPT]; in aead_decrypt()
1363 caam_req->flc_dma = ctx->flc_dma[DECRYPT]; in aead_decrypt()
1364 caam_req->cbk = aead_decrypt_done; in aead_decrypt()
1365 caam_req->ctx = &req->base; in aead_decrypt()
1366 caam_req->edesc = edesc; in aead_decrypt()
1367 ret = dpaa2_caam_enqueue(ctx->dev, caam_req); in aead_decrypt()
1368 if (ret != -EINPROGRESS && in aead_decrypt()
1369 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { in aead_decrypt()
1370 aead_unmap(ctx->dev, edesc, req); in aead_decrypt()
1379 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req); in ipsec_gcm_encrypt()
1384 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req); in ipsec_gcm_decrypt()
1394 struct skcipher_edesc *edesc = req_ctx->edesc; in skcipher_encrypt_done()
1398 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in skcipher_encrypt_done()
1401 ecode = caam_qi2_strstatus(ctx->dev, status); in skcipher_encrypt_done()
1404 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, in skcipher_encrypt_done()
1405 edesc->src_nents > 1 ? 100 : ivsize, 1); in skcipher_encrypt_done()
1407 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, in skcipher_encrypt_done()
1408 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); in skcipher_encrypt_done()
1410 skcipher_unmap(ctx->dev, edesc, req); in skcipher_encrypt_done()
1413 * The crypto API expects us to set the IV (req->iv) to the last in skcipher_encrypt_done()
1418 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, in skcipher_encrypt_done()
1432 struct skcipher_edesc *edesc = req_ctx->edesc; in skcipher_decrypt_done()
1436 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in skcipher_decrypt_done()
1439 ecode = caam_qi2_strstatus(ctx->dev, status); in skcipher_decrypt_done()
1442 DUMP_PREFIX_ADDRESS, 16, 4, req->iv, in skcipher_decrypt_done()
1443 edesc->src_nents > 1 ? 100 : ivsize, 1); in skcipher_decrypt_done()
1445 DUMP_PREFIX_ADDRESS, 16, 4, req->dst, in skcipher_decrypt_done()
1446 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); in skcipher_decrypt_done()
1448 skcipher_unmap(ctx->dev, edesc, req); in skcipher_decrypt_done()
1451 * The crypto API expects us to set the IV (req->iv) to the last in skcipher_decrypt_done()
1456 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, in skcipher_decrypt_done()
1468 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); in xts_skcipher_ivsize()
1477 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); in skcipher_encrypt()
1485 if (!req->cryptlen && !ctx->fallback) in skcipher_encrypt()
1488 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || in skcipher_encrypt()
1489 ctx->xts_key_fallback)) { in skcipher_encrypt()
1490 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); in skcipher_encrypt()
1491 skcipher_request_set_callback(&caam_req->fallback_req, in skcipher_encrypt()
1492 req->base.flags, in skcipher_encrypt()
1493 req->base.complete, in skcipher_encrypt()
1494 req->base.data); in skcipher_encrypt()
1495 skcipher_request_set_crypt(&caam_req->fallback_req, req->src, in skcipher_encrypt()
1496 req->dst, req->cryptlen, req->iv); in skcipher_encrypt()
1498 return crypto_skcipher_encrypt(&caam_req->fallback_req); in skcipher_encrypt()
1506 caam_req->flc = &ctx->flc[ENCRYPT]; in skcipher_encrypt()
1507 caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; in skcipher_encrypt()
1508 caam_req->cbk = skcipher_encrypt_done; in skcipher_encrypt()
1509 caam_req->ctx = &req->base; in skcipher_encrypt()
1510 caam_req->edesc = edesc; in skcipher_encrypt()
1511 ret = dpaa2_caam_enqueue(ctx->dev, caam_req); in skcipher_encrypt()
1512 if (ret != -EINPROGRESS && in skcipher_encrypt()
1513 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { in skcipher_encrypt()
1514 skcipher_unmap(ctx->dev, edesc, req); in skcipher_encrypt()
1527 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); in skcipher_decrypt()
1535 if (!req->cryptlen && !ctx->fallback) in skcipher_decrypt()
1538 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || in skcipher_decrypt()
1539 ctx->xts_key_fallback)) { in skcipher_decrypt()
1540 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); in skcipher_decrypt()
1541 skcipher_request_set_callback(&caam_req->fallback_req, in skcipher_decrypt()
1542 req->base.flags, in skcipher_decrypt()
1543 req->base.complete, in skcipher_decrypt()
1544 req->base.data); in skcipher_decrypt()
1545 skcipher_request_set_crypt(&caam_req->fallback_req, req->src, in skcipher_decrypt()
1546 req->dst, req->cryptlen, req->iv); in skcipher_decrypt()
1548 return crypto_skcipher_decrypt(&caam_req->fallback_req); in skcipher_decrypt()
1556 caam_req->flc = &ctx->flc[DECRYPT]; in skcipher_decrypt()
1557 caam_req->flc_dma = ctx->flc_dma[DECRYPT]; in skcipher_decrypt()
1558 caam_req->cbk = skcipher_decrypt_done; in skcipher_decrypt()
1559 caam_req->ctx = &req->base; in skcipher_decrypt()
1560 caam_req->edesc = edesc; in skcipher_decrypt()
1561 ret = dpaa2_caam_enqueue(ctx->dev, caam_req); in skcipher_decrypt()
1562 if (ret != -EINPROGRESS && in skcipher_decrypt()
1563 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { in skcipher_decrypt()
1564 skcipher_unmap(ctx->dev, edesc, req); in skcipher_decrypt()
1578 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; in caam_cra_init()
1579 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; in caam_cra_init()
1581 ctx->dev = caam->dev; in caam_cra_init()
1582 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; in caam_cra_init()
1584 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, in caam_cra_init()
1586 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); in caam_cra_init()
1587 if (dma_mapping_error(ctx->dev, dma_addr)) { in caam_cra_init()
1588 dev_err(ctx->dev, "unable to map key, shared descriptors\n"); in caam_cra_init()
1589 return -ENOMEM; in caam_cra_init()
1593 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); in caam_cra_init()
1594 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]); in caam_cra_init()
1605 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; in caam_cra_init_skcipher()
1609 const char *tfm_name = crypto_tfm_alg_name(&tfm->base); in caam_cra_init_skcipher()
1615 dev_err(caam_alg->caam.dev, in caam_cra_init_skcipher()
1621 ctx->fallback = fallback; in caam_cra_init_skcipher()
1628 ret = caam_cra_init(ctx, &caam_alg->caam, false); in caam_cra_init_skcipher()
1629 if (ret && ctx->fallback) in caam_cra_init_skcipher()
1630 crypto_free_skcipher(ctx->fallback); in caam_cra_init_skcipher()
1642 return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam, in caam_cra_init_aead()
1643 !caam_alg->caam.nodkp); in caam_cra_init_aead()
1648 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], in caam_exit_common()
1649 offsetof(struct caam_ctx, flc_dma), ctx->dir, in caam_exit_common()
1657 if (ctx->fallback) in caam_cra_exit()
1658 crypto_free_skcipher(ctx->fallback); in caam_cra_exit()
1672 .cra_driver_name = "cbc-aes-caam-qi2",
1688 .cra_driver_name = "cbc-3des-caam-qi2",
1704 .cra_driver_name = "cbc-des-caam-qi2",
1720 .cra_driver_name = "ctr-aes-caam-qi2",
1738 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1761 .cra_driver_name = "xts-aes-caam-qi2",
1778 .cra_driver_name = "chacha20-caam-qi2",
1797 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1816 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1836 .cra_driver_name = "gcm-aes-caam-qi2",
1851 /* single-pass ipsec_esp descriptor */
1856 .cra_driver_name = "authenc-hmac-md5-"
1857 "cbc-aes-caam-qi2",
1878 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1879 "cbc-aes-caam-qi2",
1900 .cra_driver_name = "authenc-hmac-sha1-"
1901 "cbc-aes-caam-qi2",
1922 .cra_driver_name = "echainiv-authenc-"
1923 "hmac-sha1-cbc-aes-caam-qi2",
1944 .cra_driver_name = "authenc-hmac-sha224-"
1945 "cbc-aes-caam-qi2",
1966 .cra_driver_name = "echainiv-authenc-"
1967 "hmac-sha224-cbc-aes-caam-qi2",
1988 .cra_driver_name = "authenc-hmac-sha256-"
1989 "cbc-aes-caam-qi2",
2010 .cra_driver_name = "echainiv-authenc-"
2011 "hmac-sha256-cbc-aes-"
2012 "caam-qi2",
2033 .cra_driver_name = "authenc-hmac-sha384-"
2034 "cbc-aes-caam-qi2",
2055 .cra_driver_name = "echainiv-authenc-"
2056 "hmac-sha384-cbc-aes-"
2057 "caam-qi2",
2078 .cra_driver_name = "authenc-hmac-sha512-"
2079 "cbc-aes-caam-qi2",
2100 .cra_driver_name = "echainiv-authenc-"
2101 "hmac-sha512-cbc-aes-"
2102 "caam-qi2",
2123 .cra_driver_name = "authenc-hmac-md5-"
2124 "cbc-des3_ede-caam-qi2",
2145 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2146 "cbc-des3_ede-caam-qi2",
2168 .cra_driver_name = "authenc-hmac-sha1-"
2169 "cbc-des3_ede-caam-qi2",
2190 .cra_driver_name = "echainiv-authenc-"
2191 "hmac-sha1-"
2192 "cbc-des3_ede-caam-qi2",
2214 .cra_driver_name = "authenc-hmac-sha224-"
2215 "cbc-des3_ede-caam-qi2",
2236 .cra_driver_name = "echainiv-authenc-"
2237 "hmac-sha224-"
2238 "cbc-des3_ede-caam-qi2",
2260 .cra_driver_name = "authenc-hmac-sha256-"
2261 "cbc-des3_ede-caam-qi2",
2282 .cra_driver_name = "echainiv-authenc-"
2283 "hmac-sha256-"
2284 "cbc-des3_ede-caam-qi2",
2306 .cra_driver_name = "authenc-hmac-sha384-"
2307 "cbc-des3_ede-caam-qi2",
2328 .cra_driver_name = "echainiv-authenc-"
2329 "hmac-sha384-"
2330 "cbc-des3_ede-caam-qi2",
2352 .cra_driver_name = "authenc-hmac-sha512-"
2353 "cbc-des3_ede-caam-qi2",
2374 .cra_driver_name = "echainiv-authenc-"
2375 "hmac-sha512-"
2376 "cbc-des3_ede-caam-qi2",
2397 .cra_driver_name = "authenc-hmac-md5-"
2398 "cbc-des-caam-qi2",
2419 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2420 "cbc-des-caam-qi2",
2441 .cra_driver_name = "authenc-hmac-sha1-"
2442 "cbc-des-caam-qi2",
2463 .cra_driver_name = "echainiv-authenc-"
2464 "hmac-sha1-cbc-des-caam-qi2",
2485 .cra_driver_name = "authenc-hmac-sha224-"
2486 "cbc-des-caam-qi2",
2507 .cra_driver_name = "echainiv-authenc-"
2508 "hmac-sha224-cbc-des-"
2509 "caam-qi2",
2530 .cra_driver_name = "authenc-hmac-sha256-"
2531 "cbc-des-caam-qi2",
2552 .cra_driver_name = "echainiv-authenc-"
2553 "hmac-sha256-cbc-des-"
2554 "caam-qi2",
2575 .cra_driver_name = "authenc-hmac-sha384-"
2576 "cbc-des-caam-qi2",
2597 .cra_driver_name = "echainiv-authenc-"
2598 "hmac-sha384-cbc-des-"
2599 "caam-qi2",
2620 .cra_driver_name = "authenc-hmac-sha512-"
2621 "cbc-des-caam-qi2",
2642 .cra_driver_name = "echainiv-authenc-"
2643 "hmac-sha512-cbc-des-"
2644 "caam-qi2",
2666 .cra_driver_name = "authenc-hmac-md5-"
2667 "rfc3686-ctr-aes-caam-qi2",
2690 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2691 "rfc3686-ctr-aes-caam-qi2",
2715 .cra_driver_name = "authenc-hmac-sha1-"
2716 "rfc3686-ctr-aes-caam-qi2",
2739 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2740 "rfc3686-ctr-aes-caam-qi2",
2764 .cra_driver_name = "authenc-hmac-sha224-"
2765 "rfc3686-ctr-aes-caam-qi2",
2788 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2789 "rfc3686-ctr-aes-caam-qi2",
2813 .cra_driver_name = "authenc-hmac-sha256-"
2814 "rfc3686-ctr-aes-caam-qi2",
2837 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2838 "rfc3686-ctr-aes-caam-qi2",
2862 .cra_driver_name = "authenc-hmac-sha384-"
2863 "rfc3686-ctr-aes-caam-qi2",
2886 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2887 "rfc3686-ctr-aes-caam-qi2",
2910 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2911 "caam-qi2",
2933 .cra_driver_name = "rfc7539esp-chacha20-"
2934 "poly1305-caam-qi2",
2957 .cra_driver_name = "authenc-hmac-sha512-"
2958 "rfc3686-ctr-aes-caam-qi2",
2981 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2982 "rfc3686-ctr-aes-caam-qi2",
3005 struct skcipher_alg *alg = &t_alg->skcipher; in caam_skcipher_alg_init()
3007 alg->base.cra_module = THIS_MODULE; in caam_skcipher_alg_init()
3008 alg->base.cra_priority = CAAM_CRA_PRIORITY; in caam_skcipher_alg_init()
3009 alg->base.cra_ctxsize = sizeof(struct caam_ctx); in caam_skcipher_alg_init()
3010 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | in caam_skcipher_alg_init()
3013 alg->init = caam_cra_init_skcipher; in caam_skcipher_alg_init()
3014 alg->exit = caam_cra_exit; in caam_skcipher_alg_init()
3019 struct aead_alg *alg = &t_alg->aead; in caam_aead_alg_init()
3021 alg->base.cra_module = THIS_MODULE; in caam_aead_alg_init()
3022 alg->base.cra_priority = CAAM_CRA_PRIORITY; in caam_aead_alg_init()
3023 alg->base.cra_ctxsize = sizeof(struct caam_ctx); in caam_aead_alg_init()
3024 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | in caam_aead_alg_init()
3027 alg->init = caam_cra_init_aead; in caam_aead_alg_init()
3028 alg->exit = caam_cra_exit_aead; in caam_aead_alg_init()
3049 * struct caam_hash_ctx - ahash per-session context
3095 int buflen = state->buflen; in buf_map_to_qm_sg()
3100 state->buf_dma = dma_map_single(dev, state->buf, buflen, in buf_map_to_qm_sg()
3102 if (dma_mapping_error(dev, state->buf_dma)) { in buf_map_to_qm_sg()
3104 state->buf_dma = 0; in buf_map_to_qm_sg()
3105 return -ENOMEM; in buf_map_to_qm_sg()
3108 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0); in buf_map_to_qm_sg()
3113 /* Map state->caam_ctx, and add it to link table */
3118 state->ctx_dma_len = ctx_len; in ctx_map_to_qm_sg()
3119 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag); in ctx_map_to_qm_sg()
3120 if (dma_mapping_error(dev, state->ctx_dma)) { in ctx_map_to_qm_sg()
3122 state->ctx_dma = 0; in ctx_map_to_qm_sg()
3123 return -ENOMEM; in ctx_map_to_qm_sg()
3126 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0); in ctx_map_to_qm_sg()
3135 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); in ahash_set_sh_desc()
3139 /* ahash_update shared descriptor */ in ahash_set_sh_desc()
3140 flc = &ctx->flc[UPDATE]; in ahash_set_sh_desc()
3141 desc = flc->sh_desc; in ahash_set_sh_desc()
3142 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, in ahash_set_sh_desc()
3143 ctx->ctx_len, true, priv->sec_attr.era); in ahash_set_sh_desc()
3144 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in ahash_set_sh_desc()
3145 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE], in ahash_set_sh_desc()
3151 /* ahash_update_first shared descriptor */ in ahash_set_sh_desc()
3152 flc = &ctx->flc[UPDATE_FIRST]; in ahash_set_sh_desc()
3153 desc = flc->sh_desc; in ahash_set_sh_desc()
3154 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, in ahash_set_sh_desc()
3155 ctx->ctx_len, false, priv->sec_attr.era); in ahash_set_sh_desc()
3156 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in ahash_set_sh_desc()
3157 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST], in ahash_set_sh_desc()
3163 /* ahash_final shared descriptor */ in ahash_set_sh_desc()
3164 flc = &ctx->flc[FINALIZE]; in ahash_set_sh_desc()
3165 desc = flc->sh_desc; in ahash_set_sh_desc()
3166 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, in ahash_set_sh_desc()
3167 ctx->ctx_len, true, priv->sec_attr.era); in ahash_set_sh_desc()
3168 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in ahash_set_sh_desc()
3169 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE], in ahash_set_sh_desc()
3175 /* ahash_digest shared descriptor */ in ahash_set_sh_desc()
3176 flc = &ctx->flc[DIGEST]; in ahash_set_sh_desc()
3177 desc = flc->sh_desc; in ahash_set_sh_desc()
3178 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, in ahash_set_sh_desc()
3179 ctx->ctx_len, false, priv->sec_attr.era); in ahash_set_sh_desc()
3180 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in ahash_set_sh_desc()
3181 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST], in ahash_set_sh_desc()
3200 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); in split_key_sh_done()
3202 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0; in split_key_sh_done()
3203 complete(&res->completion); in split_key_sh_done()
3216 int ret = -ENOMEM; in hash_digest_key()
3221 return -ENOMEM; in hash_digest_key()
3223 in_fle = &req_ctx->fd_flt[1]; in hash_digest_key()
3224 out_fle = &req_ctx->fd_flt[0]; in hash_digest_key()
3230 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL); in hash_digest_key()
3231 if (dma_mapping_error(ctx->dev, key_dma)) { in hash_digest_key()
3232 dev_err(ctx->dev, "unable to map key memory\n"); in hash_digest_key()
3236 desc = flc->sh_desc; in hash_digest_key()
3241 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | in hash_digest_key()
3248 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ in hash_digest_key()
3249 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) + in hash_digest_key()
3251 if (dma_mapping_error(ctx->dev, flc_dma)) { in hash_digest_key()
3252 dev_err(ctx->dev, "unable to map shared descriptor\n"); in hash_digest_key()
3272 result.dev = ctx->dev; in hash_digest_key()
3274 req_ctx->flc = flc; in hash_digest_key()
3275 req_ctx->flc_dma = flc_dma; in hash_digest_key()
3276 req_ctx->cbk = split_key_sh_done; in hash_digest_key()
3277 req_ctx->ctx = &result; in hash_digest_key()
3279 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in hash_digest_key()
3280 if (ret == -EINPROGRESS) { in hash_digest_key()
3289 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc), in hash_digest_key()
3292 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL); in hash_digest_key()
3307 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base); in ahash_setkey()
3312 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); in ahash_setkey()
3317 return -ENOMEM; in ahash_setkey()
3324 ctx->adata.keylen = keylen; in ahash_setkey()
3325 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & in ahash_setkey()
3327 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) in ahash_setkey()
3330 ctx->adata.key_virt = key; in ahash_setkey()
3331 ctx->adata.key_inline = true; in ahash_setkey()
3339 if (keylen > ctx->adata.keylen_pad) { in ahash_setkey()
3340 memcpy(ctx->key, key, keylen); in ahash_setkey()
3341 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma, in ahash_setkey()
3342 ctx->adata.keylen_pad, in ahash_setkey()
3351 return -EINVAL; in ahash_setkey()
3359 if (edesc->src_nents) in ahash_unmap()
3360 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); in ahash_unmap()
3362 if (edesc->qm_sg_bytes) in ahash_unmap()
3363 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes, in ahash_unmap()
3366 if (state->buf_dma) { in ahash_unmap()
3367 dma_unmap_single(dev, state->buf_dma, state->buflen, in ahash_unmap()
3369 state->buf_dma = 0; in ahash_unmap()
3379 if (state->ctx_dma) { in ahash_unmap_ctx()
3380 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); in ahash_unmap_ctx()
3381 state->ctx_dma = 0; in ahash_unmap_ctx()
3392 struct ahash_edesc *edesc = state->caam_req.edesc; in ahash_done()
3397 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in ahash_done()
3400 ecode = caam_qi2_strstatus(ctx->dev, status); in ahash_done()
3402 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_done()
3403 memcpy(req->result, state->caam_ctx, digestsize); in ahash_done()
3407 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done()
3408 ctx->ctx_len, 1); in ahash_done()
3410 req->base.complete(&req->base, ecode); in ahash_done()
3419 struct ahash_edesc *edesc = state->caam_req.edesc; in ahash_done_bi()
3423 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in ahash_done_bi()
3426 ecode = caam_qi2_strstatus(ctx->dev, status); in ahash_done_bi()
3428 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_done_bi()
3431 scatterwalk_map_and_copy(state->buf, req->src, in ahash_done_bi()
3432 req->nbytes - state->next_buflen, in ahash_done_bi()
3433 state->next_buflen, 0); in ahash_done_bi()
3434 state->buflen = state->next_buflen; in ahash_done_bi()
3437 DUMP_PREFIX_ADDRESS, 16, 4, state->buf, in ahash_done_bi()
3438 state->buflen, 1); in ahash_done_bi()
3441 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done_bi()
3442 ctx->ctx_len, 1); in ahash_done_bi()
3443 if (req->result) in ahash_done_bi()
3445 DUMP_PREFIX_ADDRESS, 16, 4, req->result, in ahash_done_bi()
3448 req->base.complete(&req->base, ecode); in ahash_done_bi()
3457 struct ahash_edesc *edesc = state->caam_req.edesc; in ahash_done_ctx_src()
3462 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in ahash_done_ctx_src()
3465 ecode = caam_qi2_strstatus(ctx->dev, status); in ahash_done_ctx_src()
3467 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_done_ctx_src()
3468 memcpy(req->result, state->caam_ctx, digestsize); in ahash_done_ctx_src()
3472 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done_ctx_src()
3473 ctx->ctx_len, 1); in ahash_done_ctx_src()
3475 req->base.complete(&req->base, ecode); in ahash_done_ctx_src()
3484 struct ahash_edesc *edesc = state->caam_req.edesc; in ahash_done_ctx_dst()
3488 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); in ahash_done_ctx_dst()
3491 ecode = caam_qi2_strstatus(ctx->dev, status); in ahash_done_ctx_dst()
3493 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_done_ctx_dst()
3496 scatterwalk_map_and_copy(state->buf, req->src, in ahash_done_ctx_dst()
3497 req->nbytes - state->next_buflen, in ahash_done_ctx_dst()
3498 state->next_buflen, 0); in ahash_done_ctx_dst()
3499 state->buflen = state->next_buflen; in ahash_done_ctx_dst()
3502 DUMP_PREFIX_ADDRESS, 16, 4, state->buf, in ahash_done_ctx_dst()
3503 state->buflen, 1); in ahash_done_ctx_dst()
3506 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, in ahash_done_ctx_dst()
3507 ctx->ctx_len, 1); in ahash_done_ctx_dst()
3508 if (req->result) in ahash_done_ctx_dst()
3510 DUMP_PREFIX_ADDRESS, 16, 4, req->result, in ahash_done_ctx_dst()
3513 req->base.complete(&req->base, ecode); in ahash_done_ctx_dst()
3521 struct caam_request *req_ctx = &state->caam_req; in ahash_update_ctx()
3522 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_update_ctx()
3523 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_update_ctx()
3524 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_update_ctx()
3526 u8 *buf = state->buf; in ahash_update_ctx()
3527 int *buflen = &state->buflen; in ahash_update_ctx()
3528 int *next_buflen = &state->next_buflen; in ahash_update_ctx()
3529 int in_len = *buflen + req->nbytes, to_hash; in ahash_update_ctx()
3534 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); in ahash_update_ctx()
3535 to_hash = in_len - *next_buflen; in ahash_update_ctx()
3539 int src_len = req->nbytes - *next_buflen; in ahash_update_ctx()
3541 src_nents = sg_nents_for_len(req->src, src_len); in ahash_update_ctx()
3543 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_update_ctx()
3548 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_update_ctx()
3551 dev_err(ctx->dev, "unable to DMA map source\n"); in ahash_update_ctx()
3552 return -ENOMEM; in ahash_update_ctx()
3561 dma_unmap_sg(ctx->dev, req->src, src_nents, in ahash_update_ctx()
3563 return -ENOMEM; in ahash_update_ctx()
3566 edesc->src_nents = src_nents; in ahash_update_ctx()
3570 sg_table = &edesc->sgt[0]; in ahash_update_ctx()
3572 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, in ahash_update_ctx()
3577 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); in ahash_update_ctx()
3582 sg_to_qm_sg_last(req->src, src_len, in ahash_update_ctx()
3585 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, in ahash_update_ctx()
3589 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, in ahash_update_ctx()
3591 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_update_ctx()
3592 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_update_ctx()
3593 ret = -ENOMEM; in ahash_update_ctx()
3596 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_update_ctx()
3598 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_update_ctx()
3601 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_update_ctx()
3602 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash); in ahash_update_ctx()
3604 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_update_ctx()
3605 dpaa2_fl_set_len(out_fle, ctx->ctx_len); in ahash_update_ctx()
3607 req_ctx->flc = &ctx->flc[UPDATE]; in ahash_update_ctx()
3608 req_ctx->flc_dma = ctx->flc_dma[UPDATE]; in ahash_update_ctx()
3609 req_ctx->cbk = ahash_done_bi; in ahash_update_ctx()
3610 req_ctx->ctx = &req->base; in ahash_update_ctx()
3611 req_ctx->edesc = edesc; in ahash_update_ctx()
3613 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_update_ctx()
3614 if (ret != -EINPROGRESS && in ahash_update_ctx()
3615 !(ret == -EBUSY && in ahash_update_ctx()
3616 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_update_ctx()
3619 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, in ahash_update_ctx()
3620 req->nbytes, 0); in ahash_update_ctx()
3630 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_update_ctx()
3640 struct caam_request *req_ctx = &state->caam_req; in ahash_final_ctx()
3641 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_final_ctx()
3642 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_final_ctx()
3643 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_final_ctx()
3645 int buflen = state->buflen; in ahash_final_ctx()
3655 return -ENOMEM; in ahash_final_ctx()
3658 sg_table = &edesc->sgt[0]; in ahash_final_ctx()
3660 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, in ahash_final_ctx()
3665 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); in ahash_final_ctx()
3671 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, in ahash_final_ctx()
3673 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_final_ctx()
3674 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_final_ctx()
3675 ret = -ENOMEM; in ahash_final_ctx()
3678 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_final_ctx()
3680 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_final_ctx()
3683 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_final_ctx()
3684 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen); in ahash_final_ctx()
3686 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_final_ctx()
3689 req_ctx->flc = &ctx->flc[FINALIZE]; in ahash_final_ctx()
3690 req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; in ahash_final_ctx()
3691 req_ctx->cbk = ahash_done_ctx_src; in ahash_final_ctx()
3692 req_ctx->ctx = &req->base; in ahash_final_ctx()
3693 req_ctx->edesc = edesc; in ahash_final_ctx()
3695 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_final_ctx()
3696 if (ret == -EINPROGRESS || in ahash_final_ctx()
3697 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_final_ctx()
3701 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_final_ctx()
3711 struct caam_request *req_ctx = &state->caam_req; in ahash_finup_ctx()
3712 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_finup_ctx()
3713 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_finup_ctx()
3714 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_finup_ctx()
3716 int buflen = state->buflen; in ahash_finup_ctx()
3724 src_nents = sg_nents_for_len(req->src, req->nbytes); in ahash_finup_ctx()
3726 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_finup_ctx()
3731 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_finup_ctx()
3734 dev_err(ctx->dev, "unable to DMA map source\n"); in ahash_finup_ctx()
3735 return -ENOMEM; in ahash_finup_ctx()
3744 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); in ahash_finup_ctx()
3745 return -ENOMEM; in ahash_finup_ctx()
3748 edesc->src_nents = src_nents; in ahash_finup_ctx()
3752 sg_table = &edesc->sgt[0]; in ahash_finup_ctx()
3754 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, in ahash_finup_ctx()
3759 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); in ahash_finup_ctx()
3763 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0); in ahash_finup_ctx()
3765 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, in ahash_finup_ctx()
3767 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_finup_ctx()
3768 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_finup_ctx()
3769 ret = -ENOMEM; in ahash_finup_ctx()
3772 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_finup_ctx()
3774 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_finup_ctx()
3777 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_finup_ctx()
3778 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes); in ahash_finup_ctx()
3780 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_finup_ctx()
3783 req_ctx->flc = &ctx->flc[FINALIZE]; in ahash_finup_ctx()
3784 req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; in ahash_finup_ctx()
3785 req_ctx->cbk = ahash_done_ctx_src; in ahash_finup_ctx()
3786 req_ctx->ctx = &req->base; in ahash_finup_ctx()
3787 req_ctx->edesc = edesc; in ahash_finup_ctx()
3789 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_finup_ctx()
3790 if (ret == -EINPROGRESS || in ahash_finup_ctx()
3791 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_finup_ctx()
3795 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); in ahash_finup_ctx()
3805 struct caam_request *req_ctx = &state->caam_req; in ahash_digest()
3806 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_digest()
3807 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_digest()
3808 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_digest()
3813 int ret = -ENOMEM; in ahash_digest()
3815 state->buf_dma = 0; in ahash_digest()
3817 src_nents = sg_nents_for_len(req->src, req->nbytes); in ahash_digest()
3819 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_digest()
3824 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_digest()
3827 dev_err(ctx->dev, "unable to map source for DMA\n"); in ahash_digest()
3837 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); in ahash_digest()
3841 edesc->src_nents = src_nents; in ahash_digest()
3842 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_digest()
3846 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; in ahash_digest()
3849 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0); in ahash_digest()
3850 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, in ahash_digest()
3852 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_digest()
3853 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_digest()
3856 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_digest()
3858 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_digest()
3861 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); in ahash_digest()
3864 state->ctx_dma_len = digestsize; in ahash_digest()
3865 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, in ahash_digest()
3867 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_digest()
3868 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_digest()
3869 state->ctx_dma = 0; in ahash_digest()
3874 dpaa2_fl_set_len(in_fle, req->nbytes); in ahash_digest()
3876 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_digest()
3879 req_ctx->flc = &ctx->flc[DIGEST]; in ahash_digest()
3880 req_ctx->flc_dma = ctx->flc_dma[DIGEST]; in ahash_digest()
3881 req_ctx->cbk = ahash_done; in ahash_digest()
3882 req_ctx->ctx = &req->base; in ahash_digest()
3883 req_ctx->edesc = edesc; in ahash_digest()
3884 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_digest()
3885 if (ret == -EINPROGRESS || in ahash_digest()
3886 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_digest()
3890 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_digest()
3900 struct caam_request *req_ctx = &state->caam_req; in ahash_final_no_ctx()
3901 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_final_no_ctx()
3902 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_final_no_ctx()
3903 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_final_no_ctx()
3905 u8 *buf = state->buf; in ahash_final_no_ctx()
3906 int buflen = state->buflen; in ahash_final_no_ctx()
3909 int ret = -ENOMEM; in ahash_final_no_ctx()
3917 state->buf_dma = dma_map_single(ctx->dev, buf, buflen, in ahash_final_no_ctx()
3919 if (dma_mapping_error(ctx->dev, state->buf_dma)) { in ahash_final_no_ctx()
3920 dev_err(ctx->dev, "unable to map src\n"); in ahash_final_no_ctx()
3925 state->ctx_dma_len = digestsize; in ahash_final_no_ctx()
3926 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, in ahash_final_no_ctx()
3928 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_final_no_ctx()
3929 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_final_no_ctx()
3930 state->ctx_dma = 0; in ahash_final_no_ctx()
3934 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_final_no_ctx()
3944 dpaa2_fl_set_addr(in_fle, state->buf_dma); in ahash_final_no_ctx()
3948 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_final_no_ctx()
3951 req_ctx->flc = &ctx->flc[DIGEST]; in ahash_final_no_ctx()
3952 req_ctx->flc_dma = ctx->flc_dma[DIGEST]; in ahash_final_no_ctx()
3953 req_ctx->cbk = ahash_done; in ahash_final_no_ctx()
3954 req_ctx->ctx = &req->base; in ahash_final_no_ctx()
3955 req_ctx->edesc = edesc; in ahash_final_no_ctx()
3957 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_final_no_ctx()
3958 if (ret == -EINPROGRESS || in ahash_final_no_ctx()
3959 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_final_no_ctx()
3963 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_final_no_ctx()
3973 struct caam_request *req_ctx = &state->caam_req; in ahash_update_no_ctx()
3974 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_update_no_ctx()
3975 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_update_no_ctx()
3976 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_update_no_ctx()
3978 u8 *buf = state->buf; in ahash_update_no_ctx()
3979 int *buflen = &state->buflen; in ahash_update_no_ctx()
3980 int *next_buflen = &state->next_buflen; in ahash_update_no_ctx()
3981 int in_len = *buflen + req->nbytes, to_hash; in ahash_update_no_ctx()
3986 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); in ahash_update_no_ctx()
3987 to_hash = in_len - *next_buflen; in ahash_update_no_ctx()
3991 int src_len = req->nbytes - *next_buflen; in ahash_update_no_ctx()
3993 src_nents = sg_nents_for_len(req->src, src_len); in ahash_update_no_ctx()
3995 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_update_no_ctx()
4000 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_update_no_ctx()
4003 dev_err(ctx->dev, "unable to DMA map source\n"); in ahash_update_no_ctx()
4004 return -ENOMEM; in ahash_update_no_ctx()
4013 dma_unmap_sg(ctx->dev, req->src, src_nents, in ahash_update_no_ctx()
4015 return -ENOMEM; in ahash_update_no_ctx()
4018 edesc->src_nents = src_nents; in ahash_update_no_ctx()
4021 sg_table = &edesc->sgt[0]; in ahash_update_no_ctx()
4023 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); in ahash_update_no_ctx()
4027 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); in ahash_update_no_ctx()
4029 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, in ahash_update_no_ctx()
4031 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_update_no_ctx()
4032 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_update_no_ctx()
4033 ret = -ENOMEM; in ahash_update_no_ctx()
4036 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_update_no_ctx()
4038 state->ctx_dma_len = ctx->ctx_len; in ahash_update_no_ctx()
4039 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, in ahash_update_no_ctx()
4040 ctx->ctx_len, DMA_FROM_DEVICE); in ahash_update_no_ctx()
4041 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_update_no_ctx()
4042 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_update_no_ctx()
4043 state->ctx_dma = 0; in ahash_update_no_ctx()
4044 ret = -ENOMEM; in ahash_update_no_ctx()
4048 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_update_no_ctx()
4051 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_update_no_ctx()
4054 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_update_no_ctx()
4055 dpaa2_fl_set_len(out_fle, ctx->ctx_len); in ahash_update_no_ctx()
4057 req_ctx->flc = &ctx->flc[UPDATE_FIRST]; in ahash_update_no_ctx()
4058 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; in ahash_update_no_ctx()
4059 req_ctx->cbk = ahash_done_ctx_dst; in ahash_update_no_ctx()
4060 req_ctx->ctx = &req->base; in ahash_update_no_ctx()
4061 req_ctx->edesc = edesc; in ahash_update_no_ctx()
4063 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_update_no_ctx()
4064 if (ret != -EINPROGRESS && in ahash_update_no_ctx()
4065 !(ret == -EBUSY && in ahash_update_no_ctx()
4066 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_update_no_ctx()
4069 state->update = ahash_update_ctx; in ahash_update_no_ctx()
4070 state->finup = ahash_finup_ctx; in ahash_update_no_ctx()
4071 state->final = ahash_final_ctx; in ahash_update_no_ctx()
4073 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, in ahash_update_no_ctx()
4074 req->nbytes, 0); in ahash_update_no_ctx()
4084 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE); in ahash_update_no_ctx()
4094 struct caam_request *req_ctx = &state->caam_req; in ahash_finup_no_ctx()
4095 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_finup_no_ctx()
4096 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_finup_no_ctx()
4097 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_finup_no_ctx()
4099 int buflen = state->buflen; in ahash_finup_no_ctx()
4104 int ret = -ENOMEM; in ahash_finup_no_ctx()
4106 src_nents = sg_nents_for_len(req->src, req->nbytes); in ahash_finup_no_ctx()
4108 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_finup_no_ctx()
4113 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_finup_no_ctx()
4116 dev_err(ctx->dev, "unable to DMA map source\n"); in ahash_finup_no_ctx()
4126 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); in ahash_finup_no_ctx()
4130 edesc->src_nents = src_nents; in ahash_finup_no_ctx()
4132 sg_table = &edesc->sgt[0]; in ahash_finup_no_ctx()
4134 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); in ahash_finup_no_ctx()
4138 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0); in ahash_finup_no_ctx()
4140 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, in ahash_finup_no_ctx()
4142 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_finup_no_ctx()
4143 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_finup_no_ctx()
4144 ret = -ENOMEM; in ahash_finup_no_ctx()
4147 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_finup_no_ctx()
4149 state->ctx_dma_len = digestsize; in ahash_finup_no_ctx()
4150 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, in ahash_finup_no_ctx()
4152 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_finup_no_ctx()
4153 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_finup_no_ctx()
4154 state->ctx_dma = 0; in ahash_finup_no_ctx()
4155 ret = -ENOMEM; in ahash_finup_no_ctx()
4159 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_finup_no_ctx()
4162 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_finup_no_ctx()
4163 dpaa2_fl_set_len(in_fle, buflen + req->nbytes); in ahash_finup_no_ctx()
4165 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_finup_no_ctx()
4168 req_ctx->flc = &ctx->flc[DIGEST]; in ahash_finup_no_ctx()
4169 req_ctx->flc_dma = ctx->flc_dma[DIGEST]; in ahash_finup_no_ctx()
4170 req_ctx->cbk = ahash_done; in ahash_finup_no_ctx()
4171 req_ctx->ctx = &req->base; in ahash_finup_no_ctx()
4172 req_ctx->edesc = edesc; in ahash_finup_no_ctx()
4173 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_finup_no_ctx()
4174 if (ret != -EINPROGRESS && in ahash_finup_no_ctx()
4175 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) in ahash_finup_no_ctx()
4180 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); in ahash_finup_no_ctx()
4190 struct caam_request *req_ctx = &state->caam_req; in ahash_update_first()
4191 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; in ahash_update_first()
4192 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; in ahash_update_first()
4193 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in ahash_update_first()
4195 u8 *buf = state->buf; in ahash_update_first()
4196 int *buflen = &state->buflen; in ahash_update_first()
4197 int *next_buflen = &state->next_buflen; in ahash_update_first()
4203 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - in ahash_update_first()
4205 to_hash = req->nbytes - *next_buflen; in ahash_update_first()
4209 int src_len = req->nbytes - *next_buflen; in ahash_update_first()
4211 src_nents = sg_nents_for_len(req->src, src_len); in ahash_update_first()
4213 dev_err(ctx->dev, "Invalid number of src SG.\n"); in ahash_update_first()
4218 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, in ahash_update_first()
4221 dev_err(ctx->dev, "unable to map source for DMA\n"); in ahash_update_first()
4222 return -ENOMEM; in ahash_update_first()
4231 dma_unmap_sg(ctx->dev, req->src, src_nents, in ahash_update_first()
4233 return -ENOMEM; in ahash_update_first()
4236 edesc->src_nents = src_nents; in ahash_update_first()
4237 sg_table = &edesc->sgt[0]; in ahash_update_first()
4239 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); in ahash_update_first()
4246 sg_to_qm_sg_last(req->src, src_len, sg_table, 0); in ahash_update_first()
4249 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, in ahash_update_first()
4252 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { in ahash_update_first()
4253 dev_err(ctx->dev, "unable to map S/G table\n"); in ahash_update_first()
4254 ret = -ENOMEM; in ahash_update_first()
4257 edesc->qm_sg_bytes = qm_sg_bytes; in ahash_update_first()
4259 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); in ahash_update_first()
4262 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); in ahash_update_first()
4265 state->ctx_dma_len = ctx->ctx_len; in ahash_update_first()
4266 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, in ahash_update_first()
4267 ctx->ctx_len, DMA_FROM_DEVICE); in ahash_update_first()
4268 if (dma_mapping_error(ctx->dev, state->ctx_dma)) { in ahash_update_first()
4269 dev_err(ctx->dev, "unable to map ctx\n"); in ahash_update_first()
4270 state->ctx_dma = 0; in ahash_update_first()
4271 ret = -ENOMEM; in ahash_update_first()
4276 dpaa2_fl_set_addr(out_fle, state->ctx_dma); in ahash_update_first()
4277 dpaa2_fl_set_len(out_fle, ctx->ctx_len); in ahash_update_first()
4279 req_ctx->flc = &ctx->flc[UPDATE_FIRST]; in ahash_update_first()
4280 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; in ahash_update_first()
4281 req_ctx->cbk = ahash_done_ctx_dst; in ahash_update_first()
4282 req_ctx->ctx = &req->base; in ahash_update_first()
4283 req_ctx->edesc = edesc; in ahash_update_first()
4285 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); in ahash_update_first()
4286 if (ret != -EINPROGRESS && in ahash_update_first()
4287 !(ret == -EBUSY && req->base.flags & in ahash_update_first()
4291 state->update = ahash_update_ctx; in ahash_update_first()
4292 state->finup = ahash_finup_ctx; in ahash_update_first()
4293 state->final = ahash_final_ctx; in ahash_update_first()
4295 state->update = ahash_update_no_ctx; in ahash_update_first()
4296 state->finup = ahash_finup_no_ctx; in ahash_update_first()
4297 state->final = ahash_final_no_ctx; in ahash_update_first()
4298 scatterwalk_map_and_copy(buf, req->src, 0, in ahash_update_first()
4299 req->nbytes, 0); in ahash_update_first()
4309 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE); in ahash_update_first()
4323 state->update = ahash_update_first; in ahash_init()
4324 state->finup = ahash_finup_first; in ahash_init()
4325 state->final = ahash_final_no_ctx; in ahash_init()
4327 state->ctx_dma = 0; in ahash_init()
4328 state->ctx_dma_len = 0; in ahash_init()
4329 state->buf_dma = 0; in ahash_init()
4330 state->buflen = 0; in ahash_init()
4331 state->next_buflen = 0; in ahash_init()
4340 return state->update(req); in ahash_update()
4347 return state->finup(req); in ahash_finup()
4354 return state->final(req); in ahash_final()
4361 u8 *buf = state->buf; in ahash_export()
4362 int len = state->buflen; in ahash_export()
4364 memcpy(export->buf, buf, len); in ahash_export()
4365 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); in ahash_export()
4366 export->buflen = len; in ahash_export()
4367 export->update = state->update; in ahash_export()
4368 export->final = state->final; in ahash_export()
4369 export->finup = state->finup; in ahash_export()
4380 memcpy(state->buf, export->buf, export->buflen); in ahash_import()
4381 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); in ahash_import()
4382 state->buflen = export->buflen; in ahash_import()
4383 state->update = export->update; in ahash_import()
4384 state->final = export->final; in ahash_import()
4385 state->finup = export->finup; in ahash_import()
4404 .driver_name = "sha1-caam-qi2",
4406 .hmac_driver_name = "hmac-sha1-caam-qi2",
4425 .driver_name = "sha224-caam-qi2",
4427 .hmac_driver_name = "hmac-sha224-caam-qi2",
4446 .driver_name = "sha256-caam-qi2",
4448 .hmac_driver_name = "hmac-sha256-caam-qi2",
4467 .driver_name = "sha384-caam-qi2",
4469 .hmac_driver_name = "hmac-sha384-caam-qi2",
4488 .driver_name = "sha512-caam-qi2",
4490 .hmac_driver_name = "hmac-sha512-caam-qi2",
4509 .driver_name = "md5-caam-qi2",
4511 .hmac_driver_name = "hmac-md5-caam-qi2",
4541 struct crypto_alg *base = tfm->__crt_alg; in caam_hash_cra_init()
4559 ctx->dev = caam_hash->dev; in caam_hash_cra_init()
4561 if (alg->setkey) { in caam_hash_cra_init()
4562 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key, in caam_hash_cra_init()
4563 ARRAY_SIZE(ctx->key), in caam_hash_cra_init()
4566 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) { in caam_hash_cra_init()
4567 dev_err(ctx->dev, "unable to map key\n"); in caam_hash_cra_init()
4568 return -ENOMEM; in caam_hash_cra_init()
4572 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), in caam_hash_cra_init()
4575 if (dma_mapping_error(ctx->dev, dma_addr)) { in caam_hash_cra_init()
4576 dev_err(ctx->dev, "unable to map shared descriptors\n"); in caam_hash_cra_init()
4577 if (ctx->adata.key_dma) in caam_hash_cra_init()
4578 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, in caam_hash_cra_init()
4579 ARRAY_SIZE(ctx->key), in caam_hash_cra_init()
4582 return -ENOMEM; in caam_hash_cra_init()
4586 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); in caam_hash_cra_init()
4589 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; in caam_hash_cra_init()
4591 ctx->ctx_len = runninglen[(ctx->adata.algtype & in caam_hash_cra_init()
4599 * For keyed hash algorithms shared descriptors in caam_hash_cra_init()
4602 return alg->setkey ? 0 : ahash_set_sh_desc(ahash); in caam_hash_cra_init()
4609 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), in caam_hash_cra_exit()
4611 if (ctx->adata.key_dma) in caam_hash_cra_exit()
4612 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, in caam_hash_cra_exit()
4613 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, in caam_hash_cra_exit()
4626 return ERR_PTR(-ENOMEM); in caam_hash_alloc()
4628 t_alg->ahash_alg = template->template_ahash; in caam_hash_alloc()
4629 halg = &t_alg->ahash_alg; in caam_hash_alloc()
4630 alg = &halg->halg.base; in caam_hash_alloc()
4633 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
4634 template->hmac_name); in caam_hash_alloc()
4635 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
4636 template->hmac_driver_name); in caam_hash_alloc()
4638 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
4639 template->name); in caam_hash_alloc()
4640 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", in caam_hash_alloc()
4641 template->driver_name); in caam_hash_alloc()
4642 t_alg->ahash_alg.setkey = NULL; in caam_hash_alloc()
4644 alg->cra_module = THIS_MODULE; in caam_hash_alloc()
4645 alg->cra_init = caam_hash_cra_init; in caam_hash_alloc()
4646 alg->cra_exit = caam_hash_cra_exit; in caam_hash_alloc()
4647 alg->cra_ctxsize = sizeof(struct caam_hash_ctx); in caam_hash_alloc()
4648 alg->cra_priority = CAAM_CRA_PRIORITY; in caam_hash_alloc()
4649 alg->cra_blocksize = template->blocksize; in caam_hash_alloc()
4650 alg->cra_alignmask = 0; in caam_hash_alloc()
4651 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; in caam_hash_alloc()
4653 t_alg->alg_type = template->alg_type; in caam_hash_alloc()
4654 t_alg->dev = dev; in caam_hash_alloc()
4664 napi_schedule_irqoff(&ppriv->napi); in dpaa2_caam_fqdan_cb()
4669 struct device *dev = priv->dev; in dpaa2_dpseci_dpio_setup()
4675 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_dpio_setup()
4676 ppriv->priv = priv; in dpaa2_dpseci_dpio_setup()
4677 nctx = &ppriv->nctx; in dpaa2_dpseci_dpio_setup()
4678 nctx->is_cdan = 0; in dpaa2_dpseci_dpio_setup()
4679 nctx->id = ppriv->rsp_fqid; in dpaa2_dpseci_dpio_setup()
4680 nctx->desired_cpu = cpu; in dpaa2_dpseci_dpio_setup()
4681 nctx->cb = dpaa2_caam_fqdan_cb; in dpaa2_dpseci_dpio_setup()
4684 ppriv->dpio = dpaa2_io_service_select(cpu); in dpaa2_dpseci_dpio_setup()
4685 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev); in dpaa2_dpseci_dpio_setup()
4688 nctx->cb = NULL; in dpaa2_dpseci_dpio_setup()
4695 err = -EPROBE_DEFER; in dpaa2_dpseci_dpio_setup()
4699 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE, in dpaa2_dpseci_dpio_setup()
4701 if (unlikely(!ppriv->store)) { in dpaa2_dpseci_dpio_setup()
4703 err = -ENOMEM; in dpaa2_dpseci_dpio_setup()
4707 if (++i == priv->num_pairs) in dpaa2_dpseci_dpio_setup()
4715 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_dpio_setup()
4716 if (!ppriv->nctx.cb) in dpaa2_dpseci_dpio_setup()
4718 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev); in dpaa2_dpseci_dpio_setup()
4722 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_dpio_setup()
4723 if (!ppriv->store) in dpaa2_dpseci_dpio_setup()
4725 dpaa2_io_store_destroy(ppriv->store); in dpaa2_dpseci_dpio_setup()
4737 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_dpio_free()
4738 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, in dpaa2_dpseci_dpio_free()
4739 priv->dev); in dpaa2_dpseci_dpio_free()
4740 dpaa2_io_store_destroy(ppriv->store); in dpaa2_dpseci_dpio_free()
4742 if (++i == priv->num_pairs) in dpaa2_dpseci_dpio_free()
4750 struct device *dev = priv->dev; in dpaa2_dpseci_bind()
4757 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_bind()
4763 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; in dpaa2_dpseci_bind()
4769 rx_queue_cfg.user_ctx = ppriv->nctx.qman64; in dpaa2_dpseci_bind()
4771 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, in dpaa2_dpseci_bind()
4779 if (++i == priv->num_pairs) in dpaa2_dpseci_bind()
4788 struct device *dev = priv->dev; in dpaa2_dpseci_congestion_free()
4790 if (!priv->cscn_mem) in dpaa2_dpseci_congestion_free()
4793 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); in dpaa2_dpseci_congestion_free()
4794 kfree(priv->cscn_mem); in dpaa2_dpseci_congestion_free()
4799 struct device *dev = priv->dev; in dpaa2_dpseci_free()
4803 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { in dpaa2_dpseci_free()
4804 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_free()
4810 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_free()
4820 dev_err(priv->dev, "Only Frame List FD format is supported!\n"); in dpaa2_caam_process_fd()
4826 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err); in dpaa2_caam_process_fd()
4830 * in FD[ERR] or FD[FRC]. in dpaa2_caam_process_fd()
4833 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt), in dpaa2_caam_process_fd()
4835 req->cbk(req->ctx, dpaa2_fd_get_frc(fd)); in dpaa2_caam_process_fd()
4844 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid, in dpaa2_caam_pull_fq()
4845 ppriv->store); in dpaa2_caam_pull_fq()
4846 } while (err == -EBUSY); in dpaa2_caam_pull_fq()
4849 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err); in dpaa2_caam_pull_fq()
4860 dq = dpaa2_io_store_next(ppriv->store, &is_last); in dpaa2_caam_store_consume()
4863 dev_dbg(ppriv->priv->dev, in dpaa2_caam_store_consume()
4865 ppriv->rsp_fqid); in dpaa2_caam_store_consume()
4877 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq)); in dpaa2_caam_store_consume()
4891 priv = ppriv->priv; in dpaa2_dpseci_poll()
4901 cleaned > budget - DPAA2_CAAM_STORE_SIZE) in dpaa2_dpseci_poll()
4912 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx); in dpaa2_dpseci_poll()
4914 dev_err(priv->dev, "Notification rearm failed: %d\n", in dpaa2_dpseci_poll()
4925 struct device *dev = priv->dev; in dpaa2_dpseci_congestion_setup()
4932 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) || in dpaa2_dpseci_congestion_setup()
4933 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) in dpaa2_dpseci_congestion_setup()
4936 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN, in dpaa2_dpseci_congestion_setup()
4938 if (!priv->cscn_mem) in dpaa2_dpseci_congestion_setup()
4939 return -ENOMEM; in dpaa2_dpseci_congestion_setup()
4941 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN); in dpaa2_dpseci_congestion_setup()
4942 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned, in dpaa2_dpseci_congestion_setup()
4944 if (dma_mapping_error(dev, priv->cscn_dma)) { in dpaa2_dpseci_congestion_setup()
4946 err = -ENOMEM; in dpaa2_dpseci_congestion_setup()
4954 cong_notif_cfg.message_iova = priv->cscn_dma; in dpaa2_dpseci_congestion_setup()
4959 err = dpseci_set_congestion_notification(priv->mc_io, 0, token, in dpaa2_dpseci_congestion_setup()
4969 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); in dpaa2_dpseci_congestion_setup()
4971 kfree(priv->cscn_mem); in dpaa2_dpseci_congestion_setup()
4978 struct device *dev = &ls_dev->dev; in dpaa2_dpseci_setup()
4986 priv->dev = dev; in dpaa2_dpseci_setup()
4987 priv->dpsec_id = ls_dev->obj_desc.id; in dpaa2_dpseci_setup()
4990 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle); in dpaa2_dpseci_setup()
4996 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver, in dpaa2_dpseci_setup()
4997 &priv->minor_ver); in dpaa2_dpseci_setup()
5003 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver); in dpaa2_dpseci_setup()
5005 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { in dpaa2_dpseci_setup()
5006 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_setup()
5013 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, in dpaa2_dpseci_setup()
5014 &priv->dpseci_attr); in dpaa2_dpseci_setup()
5020 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle, in dpaa2_dpseci_setup()
5021 &priv->sec_attr); in dpaa2_dpseci_setup()
5027 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle); in dpaa2_dpseci_setup()
5033 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues, in dpaa2_dpseci_setup()
5034 priv->dpseci_attr.num_tx_queues); in dpaa2_dpseci_setup()
5035 if (priv->num_pairs > num_online_cpus()) { in dpaa2_dpseci_setup()
5037 priv->num_pairs - num_online_cpus()); in dpaa2_dpseci_setup()
5038 priv->num_pairs = num_online_cpus(); in dpaa2_dpseci_setup()
5041 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) { in dpaa2_dpseci_setup()
5042 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, in dpaa2_dpseci_setup()
5043 &priv->rx_queue_attr[i]); in dpaa2_dpseci_setup()
5050 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) { in dpaa2_dpseci_setup()
5051 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, in dpaa2_dpseci_setup()
5052 &priv->tx_queue_attr[i]); in dpaa2_dpseci_setup()
5063 j = i % priv->num_pairs; in dpaa2_dpseci_setup()
5065 ppriv = per_cpu_ptr(priv->ppriv, cpu); in dpaa2_dpseci_setup()
5066 ppriv->req_fqid = priv->tx_queue_attr[j].fqid; in dpaa2_dpseci_setup()
5072 if (++i > priv->num_pairs) in dpaa2_dpseci_setup()
5075 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid; in dpaa2_dpseci_setup()
5076 ppriv->prio = j; in dpaa2_dpseci_setup()
5079 priv->rx_queue_attr[j].fqid, in dpaa2_dpseci_setup()
5080 priv->tx_queue_attr[j].fqid); in dpaa2_dpseci_setup()
5082 ppriv->net_dev.dev = *dev; in dpaa2_dpseci_setup()
5083 INIT_LIST_HEAD(&ppriv->net_dev.napi_list); in dpaa2_dpseci_setup()
5084 netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, in dpaa2_dpseci_setup()
5093 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_setup()
5100 struct device *dev = priv->dev; in dpaa2_dpseci_enable()
5105 for (i = 0; i < priv->num_pairs; i++) { in dpaa2_dpseci_enable()
5106 ppriv = per_cpu_ptr(priv->ppriv, i); in dpaa2_dpseci_enable()
5107 napi_enable(&ppriv->napi); in dpaa2_dpseci_enable()
5110 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_enable()
5115 struct device *dev = priv->dev; in dpaa2_dpseci_disable()
5120 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle); in dpaa2_dpseci_disable()
5126 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled); in dpaa2_dpseci_disable()
5134 for (i = 0; i < priv->num_pairs; i++) { in dpaa2_dpseci_disable()
5135 ppriv = per_cpu_ptr(priv->ppriv, i); in dpaa2_dpseci_disable()
5136 napi_disable(&ppriv->napi); in dpaa2_dpseci_disable()
5137 netif_napi_del(&ppriv->napi); in dpaa2_dpseci_disable()
5153 * There is no way to get CAAM endianness - there is no direct register in dpaa2_caam_probe()
5155 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this in dpaa2_caam_probe()
5162 dev = &dpseci_dev->dev; in dpaa2_caam_probe()
5166 return -ENOMEM; in dpaa2_caam_probe()
5170 priv->domain = iommu_get_domain_for_dev(dev); in dpaa2_caam_probe()
5176 return -ENOMEM; in dpaa2_caam_probe()
5186 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io); in dpaa2_caam_probe()
5188 if (err == -ENXIO) in dpaa2_caam_probe()
5189 err = -EPROBE_DEFER; in dpaa2_caam_probe()
5196 priv->ppriv = alloc_percpu(*priv->ppriv); in dpaa2_caam_probe()
5197 if (!priv->ppriv) { in dpaa2_caam_probe()
5199 err = -ENOMEM; in dpaa2_caam_probe()
5236 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; in dpaa2_caam_probe()
5239 if (!priv->sec_attr.des_acc_num && in dpaa2_caam_probe()
5245 if (!priv->sec_attr.aes_acc_num && in dpaa2_caam_probe()
5251 !priv->sec_attr.ccha_acc_num) in dpaa2_caam_probe()
5254 t_alg->caam.dev = dev; in dpaa2_caam_probe()
5257 err = crypto_register_skcipher(&t_alg->skcipher); in dpaa2_caam_probe()
5260 t_alg->skcipher.base.cra_driver_name, err); in dpaa2_caam_probe()
5264 t_alg->registered = true; in dpaa2_caam_probe()
5270 u32 c1_alg_sel = t_alg->caam.class1_alg_type & in dpaa2_caam_probe()
5272 u32 c2_alg_sel = t_alg->caam.class2_alg_type & in dpaa2_caam_probe()
5276 if (!priv->sec_attr.des_acc_num && in dpaa2_caam_probe()
5282 if (!priv->sec_attr.aes_acc_num && in dpaa2_caam_probe()
5288 !priv->sec_attr.ccha_acc_num) in dpaa2_caam_probe()
5293 !priv->sec_attr.ptha_acc_num) in dpaa2_caam_probe()
5301 !priv->sec_attr.md_acc_num) in dpaa2_caam_probe()
5304 t_alg->caam.dev = dev; in dpaa2_caam_probe()
5307 err = crypto_register_aead(&t_alg->aead); in dpaa2_caam_probe()
5310 t_alg->aead.base.cra_driver_name, err); in dpaa2_caam_probe()
5314 t_alg->registered = true; in dpaa2_caam_probe()
5327 if (!priv->sec_attr.md_acc_num) in dpaa2_caam_probe()
5339 alg->hmac_driver_name, err); in dpaa2_caam_probe()
5343 err = crypto_register_ahash(&t_alg->ahash_alg); in dpaa2_caam_probe()
5346 t_alg->ahash_alg.halg.base.cra_driver_name, in dpaa2_caam_probe()
5350 list_add_tail(&t_alg->entry, &hash_list); in dpaa2_caam_probe()
5358 alg->driver_name, err); in dpaa2_caam_probe()
5362 err = crypto_register_ahash(&t_alg->ahash_alg); in dpaa2_caam_probe()
5365 t_alg->ahash_alg.halg.base.cra_driver_name, in dpaa2_caam_probe()
5369 list_add_tail(&t_alg->entry, &hash_list); in dpaa2_caam_probe()
5382 free_percpu(priv->ppriv); in dpaa2_caam_probe()
5384 fsl_mc_portal_free(priv->mc_io); in dpaa2_caam_probe()
5397 dev = &ls_dev->dev; in dpaa2_caam_remove()
5405 if (t_alg->registered) in dpaa2_caam_remove()
5406 crypto_unregister_aead(&t_alg->aead); in dpaa2_caam_remove()
5412 if (t_alg->registered) in dpaa2_caam_remove()
5413 crypto_unregister_skcipher(&t_alg->skcipher); in dpaa2_caam_remove()
5420 crypto_unregister_ahash(&t_hash_alg->ahash_alg); in dpaa2_caam_remove()
5421 list_del(&t_hash_alg->entry); in dpaa2_caam_remove()
5429 free_percpu(priv->ppriv); in dpaa2_caam_remove()
5430 fsl_mc_portal_free(priv->mc_io); in dpaa2_caam_remove()
5446 if (priv->cscn_mem) { in dpaa2_caam_enqueue()
5447 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma, in dpaa2_caam_enqueue()
5450 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) { in dpaa2_caam_enqueue()
5452 return -EBUSY; in dpaa2_caam_enqueue()
5456 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma); in dpaa2_caam_enqueue()
5458 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt), in dpaa2_caam_enqueue()
5460 if (dma_mapping_error(dev, req->fd_flt_dma)) { in dpaa2_caam_enqueue()
5467 dpaa2_fd_set_addr(&fd, req->fd_flt_dma); in dpaa2_caam_enqueue()
5468 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); in dpaa2_caam_enqueue()
5469 dpaa2_fd_set_flc(&fd, req->flc_dma); in dpaa2_caam_enqueue()
5471 ppriv = raw_cpu_ptr(priv->ppriv); in dpaa2_caam_enqueue()
5472 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { in dpaa2_caam_enqueue()
5473 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, in dpaa2_caam_enqueue()
5475 if (err != -EBUSY) in dpaa2_caam_enqueue()
5486 return -EINPROGRESS; in dpaa2_caam_enqueue()
5489 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt), in dpaa2_caam_enqueue()
5491 return -EIO; in dpaa2_caam_enqueue()