Lines Matching +full:resume +full:- +full:offset
6 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
13 * Some ideas are from omap-sham.c drivers.
32 #include <linux/dma-mapping.h>
42 #include <linux/platform_data/crypto-atmel.h>
43 #include "atmel-sha-regs.h"
44 #include "atmel-authenc.h"
103 unsigned int offset; /* offset in current sg */ member
149 atmel_sha_fn_t resume; member
172 static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr) in atmel_sha_reg_name() argument
174 switch (offset) { in atmel_sha_reg_name()
215 snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2); in atmel_sha_reg_name()
236 16u + ((offset - SHA_REG_DIGEST(0)) >> 2)); in atmel_sha_reg_name()
239 (offset - SHA_REG_DIGEST(0)) >> 2); in atmel_sha_reg_name()
246 snprintf(tmp, sz, "0x%02x", offset); in atmel_sha_reg_name()
255 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) in atmel_sha_read() argument
257 u32 value = readl_relaxed(dd->io_base + offset); in atmel_sha_read()
260 if (dd->flags & SHA_FLAGS_DUMP_REG) { in atmel_sha_read()
263 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, in atmel_sha_read()
264 atmel_sha_reg_name(offset, tmp, sizeof(tmp), false)); in atmel_sha_read()
272 u32 offset, u32 value) in atmel_sha_write() argument
275 if (dd->flags & SHA_FLAGS_DUMP_REG) { in atmel_sha_write()
278 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, in atmel_sha_write()
279 atmel_sha_reg_name(offset, tmp, sizeof(tmp), true)); in atmel_sha_write()
283 writel_relaxed(value, dd->io_base + offset); in atmel_sha_write()
288 struct ahash_request *req = dd->req; in atmel_sha_complete()
290 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | in atmel_sha_complete()
294 clk_disable(dd->iclk); in atmel_sha_complete()
296 if ((dd->is_async || dd->force_complete) && req->base.complete) in atmel_sha_complete()
297 req->base.complete(&req->base, err); in atmel_sha_complete()
300 tasklet_schedule(&dd->queue_task); in atmel_sha_complete()
309 while ((ctx->bufcnt < ctx->buflen) && ctx->total) { in atmel_sha_append_sg()
310 count = min(ctx->sg->length - ctx->offset, ctx->total); in atmel_sha_append_sg()
311 count = min(count, ctx->buflen - ctx->bufcnt); in atmel_sha_append_sg()
320 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { in atmel_sha_append_sg()
321 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
328 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, in atmel_sha_append_sg()
329 ctx->offset, count, 0); in atmel_sha_append_sg()
331 ctx->bufcnt += count; in atmel_sha_append_sg()
332 ctx->offset += count; in atmel_sha_append_sg()
333 ctx->total -= count; in atmel_sha_append_sg()
335 if (ctx->offset == ctx->sg->length) { in atmel_sha_append_sg()
336 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
337 if (ctx->sg) in atmel_sha_append_sg()
338 ctx->offset = 0; in atmel_sha_append_sg()
340 ctx->total = 0; in atmel_sha_append_sg()
351 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
356 * - if message length < 56 bytes then padlen = 56 - message length
357 * - else padlen = 64 + 56 - message length
360 * - if message length < 112 bytes then padlen = 112 - message length
361 * - else padlen = 128 + 112 - message length
369 size[0] = ctx->digcnt[0]; in atmel_sha_fill_padding()
370 size[1] = ctx->digcnt[1]; in atmel_sha_fill_padding()
372 size[0] += ctx->bufcnt; in atmel_sha_fill_padding()
373 if (size[0] < ctx->bufcnt) in atmel_sha_fill_padding()
383 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_fill_padding()
386 index = ctx->bufcnt & 0x7f; in atmel_sha_fill_padding()
387 padlen = (index < 112) ? (112 - index) : ((128+112) - index); in atmel_sha_fill_padding()
388 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
389 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
390 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); in atmel_sha_fill_padding()
391 ctx->bufcnt += padlen + 16; in atmel_sha_fill_padding()
392 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
396 index = ctx->bufcnt & 0x3f; in atmel_sha_fill_padding()
397 padlen = (index < 56) ? (56 - index) : ((64+56) - index); in atmel_sha_fill_padding()
398 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
399 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
400 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); in atmel_sha_fill_padding()
401 ctx->bufcnt += padlen + 8; in atmel_sha_fill_padding()
402 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
413 if (!tctx->dd) { in atmel_sha_find_dev()
418 tctx->dd = dd; in atmel_sha_find_dev()
420 dd = tctx->dd; in atmel_sha_find_dev()
435 ctx->dd = dd; in atmel_sha_init()
437 ctx->flags = 0; in atmel_sha_init()
439 dev_dbg(dd->dev, "init: digest size: %d\n", in atmel_sha_init()
444 ctx->flags |= SHA_FLAGS_SHA1; in atmel_sha_init()
445 ctx->block_size = SHA1_BLOCK_SIZE; in atmel_sha_init()
448 ctx->flags |= SHA_FLAGS_SHA224; in atmel_sha_init()
449 ctx->block_size = SHA224_BLOCK_SIZE; in atmel_sha_init()
452 ctx->flags |= SHA_FLAGS_SHA256; in atmel_sha_init()
453 ctx->block_size = SHA256_BLOCK_SIZE; in atmel_sha_init()
456 ctx->flags |= SHA_FLAGS_SHA384; in atmel_sha_init()
457 ctx->block_size = SHA384_BLOCK_SIZE; in atmel_sha_init()
460 ctx->flags |= SHA_FLAGS_SHA512; in atmel_sha_init()
461 ctx->block_size = SHA512_BLOCK_SIZE; in atmel_sha_init()
464 return -EINVAL; in atmel_sha_init()
468 ctx->bufcnt = 0; in atmel_sha_init()
469 ctx->digcnt[0] = 0; in atmel_sha_init()
470 ctx->digcnt[1] = 0; in atmel_sha_init()
471 ctx->buflen = SHA_BUFFER_LEN; in atmel_sha_init()
478 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_write_ctrl()
483 if (!dd->caps.has_dma) in atmel_sha_write_ctrl()
486 if (dd->caps.has_dualbuff) in atmel_sha_write_ctrl()
492 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_write_ctrl()
523 if (!(ctx->digcnt[0] || ctx->digcnt[1])) { in atmel_sha_write_ctrl()
525 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) { in atmel_sha_write_ctrl()
526 const u32 *hash = (const u32 *)ctx->digest; in atmel_sha_write_ctrl()
534 ctx->flags &= ~SHA_FLAGS_RESTORE; in atmel_sha_write_ctrl()
552 atmel_sha_fn_t resume) in atmel_sha_wait_for_data_ready() argument
557 return resume(dd); in atmel_sha_wait_for_data_ready()
559 dd->resume = resume; in atmel_sha_wait_for_data_ready()
561 return -EINPROGRESS; in atmel_sha_wait_for_data_ready()
567 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_cpu()
571 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_cpu()
572 ctx->digcnt[1], ctx->digcnt[0], length, final); in atmel_sha_xmit_cpu()
576 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_cpu()
577 ctx->digcnt[0] += length; in atmel_sha_xmit_cpu()
578 if (ctx->digcnt[0] < length) in atmel_sha_xmit_cpu()
579 ctx->digcnt[1]++; in atmel_sha_xmit_cpu()
582 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_cpu()
586 dd->flags |= SHA_FLAGS_CPU; in atmel_sha_xmit_cpu()
591 return -EINPROGRESS; in atmel_sha_xmit_cpu()
597 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_pdc()
600 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_pdc()
601 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_pdc()
614 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_pdc()
615 ctx->digcnt[0] += length1; in atmel_sha_xmit_pdc()
616 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_pdc()
617 ctx->digcnt[1]++; in atmel_sha_xmit_pdc()
620 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_pdc()
622 dd->flags |= SHA_FLAGS_DMA_ACTIVE; in atmel_sha_xmit_pdc()
627 return -EINPROGRESS; in atmel_sha_xmit_pdc()
634 dd->is_async = true; in atmel_sha_dma_callback()
636 /* dma_lch_in - completed - wait DATRDY */ in atmel_sha_dma_callback()
643 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_dma()
647 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_dma()
648 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_dma()
650 dd->dma_lch_in.dma_conf.src_maxburst = 16; in atmel_sha_xmit_dma()
651 dd->dma_lch_in.dma_conf.dst_maxburst = 16; in atmel_sha_xmit_dma()
653 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); in atmel_sha_xmit_dma()
661 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, in atmel_sha_xmit_dma()
667 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, in atmel_sha_xmit_dma()
671 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_xmit_dma()
673 in_desc->callback = atmel_sha_dma_callback; in atmel_sha_xmit_dma()
674 in_desc->callback_param = dd; in atmel_sha_xmit_dma()
678 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_dma()
679 ctx->digcnt[0] += length1; in atmel_sha_xmit_dma()
680 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_dma()
681 ctx->digcnt[1]++; in atmel_sha_xmit_dma()
684 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_dma()
686 dd->flags |= SHA_FLAGS_DMA_ACTIVE; in atmel_sha_xmit_dma()
690 dma_async_issue_pending(dd->dma_lch_in.chan); in atmel_sha_xmit_dma()
692 return -EINPROGRESS; in atmel_sha_xmit_dma()
698 if (dd->caps.has_dma) in atmel_sha_xmit_start()
708 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_cpu()
713 bufcnt = ctx->bufcnt; in atmel_sha_update_cpu()
714 ctx->bufcnt = 0; in atmel_sha_update_cpu()
716 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); in atmel_sha_update_cpu()
723 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_xmit_dma_map()
724 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_xmit_dma_map()
725 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_xmit_dma_map()
726 dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen + in atmel_sha_xmit_dma_map()
727 ctx->block_size); in atmel_sha_xmit_dma_map()
728 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_xmit_dma_map()
731 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_xmit_dma_map()
734 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); in atmel_sha_xmit_dma_map()
739 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_slow()
745 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_slow()
747 dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n", in atmel_sha_update_dma_slow()
748 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); in atmel_sha_update_dma_slow()
753 if (final || (ctx->bufcnt == ctx->buflen)) { in atmel_sha_update_dma_slow()
754 count = ctx->bufcnt; in atmel_sha_update_dma_slow()
755 ctx->bufcnt = 0; in atmel_sha_update_dma_slow()
764 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_start()
769 if (!ctx->total) in atmel_sha_update_dma_start()
772 if (ctx->bufcnt || ctx->offset) in atmel_sha_update_dma_start()
775 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n", in atmel_sha_update_dma_start()
776 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); in atmel_sha_update_dma_start()
778 sg = ctx->sg; in atmel_sha_update_dma_start()
780 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in atmel_sha_update_dma_start()
783 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) in atmel_sha_update_dma_start()
784 /* size is not ctx->block_size aligned */ in atmel_sha_update_dma_start()
787 length = min(ctx->total, sg->length); in atmel_sha_update_dma_start()
790 if (!(ctx->flags & SHA_FLAGS_FINUP)) { in atmel_sha_update_dma_start()
791 /* not last sg must be ctx->block_size aligned */ in atmel_sha_update_dma_start()
792 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
793 length -= tail; in atmel_sha_update_dma_start()
797 ctx->total -= length; in atmel_sha_update_dma_start()
798 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
800 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_start()
804 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
805 length -= tail; in atmel_sha_update_dma_start()
806 ctx->total += tail; in atmel_sha_update_dma_start()
807 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
809 sg = ctx->sg; in atmel_sha_update_dma_start()
814 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_update_dma_start()
815 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_start()
816 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_update_dma_start()
817 dev_err(dd->dev, "dma %zu bytes error\n", in atmel_sha_update_dma_start()
818 ctx->buflen + ctx->block_size); in atmel_sha_update_dma_start()
819 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
823 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_update_dma_start()
824 count = ctx->bufcnt; in atmel_sha_update_dma_start()
825 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
826 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, in atmel_sha_update_dma_start()
829 ctx->sg = sg; in atmel_sha_update_dma_start()
830 if (!dma_map_sg(dd->dev, ctx->sg, 1, in atmel_sha_update_dma_start()
832 dev_err(dd->dev, "dma_map_sg error\n"); in atmel_sha_update_dma_start()
833 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
836 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
838 count = ctx->bufcnt; in atmel_sha_update_dma_start()
839 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
840 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), in atmel_sha_update_dma_start()
841 length, ctx->dma_addr, count, final); in atmel_sha_update_dma_start()
845 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in atmel_sha_update_dma_start()
846 dev_err(dd->dev, "dma_map_sg error\n"); in atmel_sha_update_dma_start()
847 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
850 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
853 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, in atmel_sha_update_dma_start()
859 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_stop()
861 if (ctx->flags & SHA_FLAGS_SG) { in atmel_sha_update_dma_stop()
862 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
863 if (ctx->sg->length == ctx->offset) { in atmel_sha_update_dma_stop()
864 ctx->sg = sg_next(ctx->sg); in atmel_sha_update_dma_stop()
865 if (ctx->sg) in atmel_sha_update_dma_stop()
866 ctx->offset = 0; in atmel_sha_update_dma_stop()
868 if (ctx->flags & SHA_FLAGS_PAD) { in atmel_sha_update_dma_stop()
869 dma_unmap_single(dd->dev, ctx->dma_addr, in atmel_sha_update_dma_stop()
870 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
873 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + in atmel_sha_update_dma_stop()
874 ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
882 struct ahash_request *req = dd->req; in atmel_sha_update_req()
886 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", in atmel_sha_update_req()
887 ctx->total, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
889 if (ctx->flags & SHA_FLAGS_CPU) in atmel_sha_update_req()
895 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", in atmel_sha_update_req()
896 err, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
903 struct ahash_request *req = dd->req; in atmel_sha_final_req()
908 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { in atmel_sha_final_req()
910 count = ctx->bufcnt; in atmel_sha_final_req()
911 ctx->bufcnt = 0; in atmel_sha_final_req()
917 count = ctx->bufcnt; in atmel_sha_final_req()
918 ctx->bufcnt = 0; in atmel_sha_final_req()
919 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); in atmel_sha_final_req()
922 dev_dbg(dd->dev, "final_req: err: %d\n", err); in atmel_sha_final_req()
930 u32 *hash = (u32 *)ctx->digest; in atmel_sha_copy_hash()
933 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_copy_hash()
954 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); in atmel_sha_copy_hash()
955 ctx->flags |= SHA_FLAGS_RESTORE; in atmel_sha_copy_hash()
962 if (!req->result) in atmel_sha_copy_ready_hash()
965 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_copy_ready_hash()
968 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
972 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
976 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
980 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
984 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
992 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish()
994 if (ctx->digcnt[0] || ctx->digcnt[1]) in atmel_sha_finish()
997 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1], in atmel_sha_finish()
998 ctx->digcnt[0], ctx->bufcnt); in atmel_sha_finish()
1006 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish_req()
1010 if (SHA_FLAGS_FINAL & dd->flags) in atmel_sha_finish_req()
1013 ctx->flags |= SHA_FLAGS_ERROR; in atmel_sha_finish_req()
1024 err = clk_enable(dd->iclk); in atmel_sha_hw_init()
1028 if (!(SHA_FLAGS_INIT & dd->flags)) { in atmel_sha_hw_init()
1030 dd->flags |= SHA_FLAGS_INIT; in atmel_sha_hw_init()
1031 dd->err = 0; in atmel_sha_hw_init()
1046 dd->hw_version = atmel_sha_get_version(dd); in atmel_sha_hw_version_init()
1048 dev_info(dd->dev, in atmel_sha_hw_version_init()
1049 "version: 0x%x\n", dd->hw_version); in atmel_sha_hw_version_init()
1051 clk_disable(dd->iclk); in atmel_sha_hw_version_init()
1063 spin_lock_irqsave(&dd->lock, flags); in atmel_sha_handle_queue()
1065 ret = ahash_enqueue_request(&dd->queue, req); in atmel_sha_handle_queue()
1067 if (SHA_FLAGS_BUSY & dd->flags) { in atmel_sha_handle_queue()
1068 spin_unlock_irqrestore(&dd->lock, flags); in atmel_sha_handle_queue()
1072 backlog = crypto_get_backlog(&dd->queue); in atmel_sha_handle_queue()
1073 async_req = crypto_dequeue_request(&dd->queue); in atmel_sha_handle_queue()
1075 dd->flags |= SHA_FLAGS_BUSY; in atmel_sha_handle_queue()
1077 spin_unlock_irqrestore(&dd->lock, flags); in atmel_sha_handle_queue()
1083 backlog->complete(backlog, -EINPROGRESS); in atmel_sha_handle_queue()
1085 ctx = crypto_tfm_ctx(async_req->tfm); in atmel_sha_handle_queue()
1087 dd->req = ahash_request_cast(async_req); in atmel_sha_handle_queue()
1088 start_async = (dd->req != req); in atmel_sha_handle_queue()
1089 dd->is_async = start_async; in atmel_sha_handle_queue()
1090 dd->force_complete = false; in atmel_sha_handle_queue()
1092 /* WARNING: ctx->start() MAY change dd->is_async. */ in atmel_sha_handle_queue()
1093 err = ctx->start(dd); in atmel_sha_handle_queue()
1101 struct ahash_request *req = dd->req; in atmel_sha_start()
1105 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", in atmel_sha_start()
1106 ctx->op, req->nbytes); in atmel_sha_start()
1114 * -EINPROGRESS: the hardware is busy and the SHA driver will resume in atmel_sha_start()
1131 dd->resume = atmel_sha_done; in atmel_sha_start()
1132 if (ctx->op == SHA_OP_UPDATE) { in atmel_sha_start()
1134 if (!err && (ctx->flags & SHA_FLAGS_FINUP)) in atmel_sha_start()
1137 } else if (ctx->op == SHA_OP_FINAL) { in atmel_sha_start()
1145 dev_dbg(dd->dev, "exit, err: %d\n", err); in atmel_sha_start()
1153 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in atmel_sha_enqueue()
1154 struct atmel_sha_dev *dd = tctx->dd; in atmel_sha_enqueue()
1156 ctx->op = op; in atmel_sha_enqueue()
1165 if (!req->nbytes) in atmel_sha_update()
1168 ctx->total = req->nbytes; in atmel_sha_update()
1169 ctx->sg = req->src; in atmel_sha_update()
1170 ctx->offset = 0; in atmel_sha_update()
1172 if (ctx->flags & SHA_FLAGS_FINUP) { in atmel_sha_update()
1173 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) in atmel_sha_update()
1175 ctx->flags |= SHA_FLAGS_CPU; in atmel_sha_update()
1176 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { in atmel_sha_update()
1187 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_final()
1189 if (ctx->flags & SHA_FLAGS_ERROR) in atmel_sha_final()
1192 if (ctx->flags & SHA_FLAGS_PAD) in atmel_sha_final()
1204 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_finup()
1207 if (err1 == -EINPROGRESS || in atmel_sha_finup()
1208 (err1 == -EBUSY && (ahash_request_flags(req) & in atmel_sha_finup()
1249 ctx->start = atmel_sha_start; in atmel_sha_cra_init()
1268 .cra_driver_name = "atmel-sha1",
1292 .cra_driver_name = "atmel-sha256",
1318 .cra_driver_name = "atmel-sha224",
1344 .cra_driver_name = "atmel-sha384",
1368 .cra_driver_name = "atmel-sha512",
1392 if (SHA_FLAGS_CPU & dd->flags) { in atmel_sha_done()
1393 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { in atmel_sha_done()
1394 dd->flags &= ~SHA_FLAGS_OUTPUT_READY; in atmel_sha_done()
1397 } else if (SHA_FLAGS_DMA_READY & dd->flags) { in atmel_sha_done()
1398 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { in atmel_sha_done()
1399 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; in atmel_sha_done()
1401 if (dd->err) { in atmel_sha_done()
1402 err = dd->err; in atmel_sha_done()
1406 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { in atmel_sha_done()
1407 /* hash or semi-hash ready */ in atmel_sha_done()
1408 dd->flags &= ~(SHA_FLAGS_DMA_READY | in atmel_sha_done()
1411 if (err != -EINPROGRESS) in atmel_sha_done()
1419 atmel_sha_finish_req(dd->req, err); in atmel_sha_done()
1428 dd->is_async = true; in atmel_sha_done_task()
1429 (void)dd->resume(dd); in atmel_sha_done_task()
1440 if (SHA_FLAGS_BUSY & sha_dd->flags) { in atmel_sha_irq()
1441 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; in atmel_sha_irq()
1442 if (!(SHA_FLAGS_CPU & sha_dd->flags)) in atmel_sha_irq()
1443 sha_dd->flags |= SHA_FLAGS_DMA_READY; in atmel_sha_irq()
1444 tasklet_schedule(&sha_dd->done_task); in atmel_sha_irq()
1446 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); in atmel_sha_irq()
1461 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_check_aligned()
1462 struct ahash_request *req = dd->req; in atmel_sha_dma_check_aligned()
1464 size_t bs = ctx->block_size; in atmel_sha_dma_check_aligned()
1468 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in atmel_sha_dma_check_aligned()
1475 if (len <= sg->length) { in atmel_sha_dma_check_aligned()
1476 dma->nents = nents + 1; in atmel_sha_dma_check_aligned()
1477 dma->last_sg_length = sg->length; in atmel_sha_dma_check_aligned()
1478 sg->length = ALIGN(len, sizeof(u32)); in atmel_sha_dma_check_aligned()
1483 if (!IS_ALIGNED(sg->length, bs)) in atmel_sha_dma_check_aligned()
1486 len -= sg->length; in atmel_sha_dma_check_aligned()
1495 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_callback2()
1499 dmaengine_terminate_all(dma->chan); in atmel_sha_dma_callback2()
1500 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_callback2()
1502 sg = dma->sg; in atmel_sha_dma_callback2()
1503 for (nents = 0; nents < dma->nents - 1; ++nents) in atmel_sha_dma_callback2()
1505 sg->length = dma->last_sg_length; in atmel_sha_dma_callback2()
1507 dd->is_async = true; in atmel_sha_dma_callback2()
1508 (void)atmel_sha_wait_for_data_ready(dd, dd->resume); in atmel_sha_dma_callback2()
1514 atmel_sha_fn_t resume) in atmel_sha_dma_start() argument
1516 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_start()
1517 struct dma_slave_config *config = &dma->dma_conf; in atmel_sha_dma_start()
1518 struct dma_chan *chan = dma->chan; in atmel_sha_dma_start()
1524 dd->resume = resume; in atmel_sha_dma_start()
1527 * dma->nents has already been initialized by in atmel_sha_dma_start()
1530 dma->sg = src; in atmel_sha_dma_start()
1531 sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_start()
1533 err = -ENOMEM; in atmel_sha_dma_start()
1537 config->src_maxburst = 16; in atmel_sha_dma_start()
1538 config->dst_maxburst = 16; in atmel_sha_dma_start()
1543 desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV, in atmel_sha_dma_start()
1546 err = -ENOMEM; in atmel_sha_dma_start()
1550 desc->callback = atmel_sha_dma_callback2; in atmel_sha_dma_start()
1551 desc->callback_param = dd; in atmel_sha_dma_start()
1559 return -EINPROGRESS; in atmel_sha_dma_start()
1562 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_start()
1572 struct ahash_request *req = dd->req; in atmel_sha_cpu_transfer()
1574 const u32 *words = (const u32 *)ctx->buffer; in atmel_sha_cpu_transfer()
1578 din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1; in atmel_sha_cpu_transfer()
1581 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32)); in atmel_sha_cpu_transfer()
1585 ctx->offset += ctx->bufcnt; in atmel_sha_cpu_transfer()
1586 ctx->total -= ctx->bufcnt; in atmel_sha_cpu_transfer()
1588 if (!ctx->total) in atmel_sha_cpu_transfer()
1593 * Fill ctx->buffer now with the next data to be written into in atmel_sha_cpu_transfer()
1599 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); in atmel_sha_cpu_transfer()
1600 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, in atmel_sha_cpu_transfer()
1601 ctx->offset, ctx->bufcnt, 0); in atmel_sha_cpu_transfer()
1607 dd->resume = atmel_sha_cpu_transfer; in atmel_sha_cpu_transfer()
1609 return -EINPROGRESS; in atmel_sha_cpu_transfer()
1613 if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY))) in atmel_sha_cpu_transfer()
1614 return dd->cpu_transfer_complete(dd); in atmel_sha_cpu_transfer()
1616 return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete); in atmel_sha_cpu_transfer()
1624 atmel_sha_fn_t resume) in atmel_sha_cpu_start() argument
1626 struct ahash_request *req = dd->req; in atmel_sha_cpu_start()
1630 return resume(dd); in atmel_sha_cpu_start()
1632 ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY); in atmel_sha_cpu_start()
1635 ctx->flags |= SHA_FLAGS_IDATAR0; in atmel_sha_cpu_start()
1638 ctx->flags |= SHA_FLAGS_WAIT_DATARDY; in atmel_sha_cpu_start()
1640 ctx->sg = sg; in atmel_sha_cpu_start()
1641 ctx->total = len; in atmel_sha_cpu_start()
1642 ctx->offset = 0; in atmel_sha_cpu_start()
1645 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); in atmel_sha_cpu_start()
1646 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, in atmel_sha_cpu_start()
1647 ctx->offset, ctx->bufcnt, 0); in atmel_sha_cpu_start()
1649 dd->cpu_transfer_complete = resume; in atmel_sha_cpu_start()
1656 atmel_sha_fn_t resume) in atmel_sha_cpu_hash() argument
1658 struct ahash_request *req = dd->req; in atmel_sha_cpu_hash()
1663 if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding)) in atmel_sha_cpu_hash()
1664 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_cpu_hash()
1666 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); in atmel_sha_cpu_hash()
1672 sg_init_one(&dd->tmp, data, datalen); in atmel_sha_cpu_hash()
1673 return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume); in atmel_sha_cpu_hash()
1693 kfree(hkey->keydup); in atmel_sha_hmac_key_release()
1703 if (keylen > sizeof(hkey->buffer)) { in atmel_sha_hmac_key_set()
1704 hkey->keydup = kmemdup(key, keylen, GFP_KERNEL); in atmel_sha_hmac_key_set()
1705 if (!hkey->keydup) in atmel_sha_hmac_key_set()
1706 return -ENOMEM; in atmel_sha_hmac_key_set()
1709 memcpy(hkey->buffer, key, keylen); in atmel_sha_hmac_key_set()
1712 hkey->valid = true; in atmel_sha_hmac_key_set()
1713 hkey->keylen = keylen; in atmel_sha_hmac_key_set()
1721 if (!hkey->valid) in atmel_sha_hmac_key_get()
1724 *keylen = hkey->keylen; in atmel_sha_hmac_key_get()
1725 *key = (hkey->keydup) ? hkey->keydup : hkey->buffer; in atmel_sha_hmac_key_get()
1736 atmel_sha_fn_t resume; member
1740 atmel_sha_fn_t resume);
1754 atmel_sha_fn_t resume) in atmel_sha_hmac_setup() argument
1756 struct ahash_request *req = dd->req; in atmel_sha_hmac_setup()
1764 hmac->resume = resume; in atmel_sha_hmac_setup()
1765 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_hmac_setup()
1767 ctx->block_size = SHA1_BLOCK_SIZE; in atmel_sha_hmac_setup()
1768 ctx->hash_size = SHA1_DIGEST_SIZE; in atmel_sha_hmac_setup()
1772 ctx->block_size = SHA224_BLOCK_SIZE; in atmel_sha_hmac_setup()
1773 ctx->hash_size = SHA256_DIGEST_SIZE; in atmel_sha_hmac_setup()
1777 ctx->block_size = SHA256_BLOCK_SIZE; in atmel_sha_hmac_setup()
1778 ctx->hash_size = SHA256_DIGEST_SIZE; in atmel_sha_hmac_setup()
1782 ctx->block_size = SHA384_BLOCK_SIZE; in atmel_sha_hmac_setup()
1783 ctx->hash_size = SHA512_DIGEST_SIZE; in atmel_sha_hmac_setup()
1787 ctx->block_size = SHA512_BLOCK_SIZE; in atmel_sha_hmac_setup()
1788 ctx->hash_size = SHA512_DIGEST_SIZE; in atmel_sha_hmac_setup()
1792 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_hmac_setup()
1794 bs = ctx->block_size; in atmel_sha_hmac_setup()
1796 if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen))) in atmel_sha_hmac_setup()
1797 return resume(dd); in atmel_sha_hmac_setup()
1804 memcpy((u8 *)hmac->ipad, key, keylen); in atmel_sha_hmac_setup()
1805 memset((u8 *)hmac->ipad + keylen, 0, bs - keylen); in atmel_sha_hmac_setup()
1818 struct ahash_request *req = dd->req; in atmel_sha_hmac_prehash_key_done()
1823 size_t bs = ctx->block_size; in atmel_sha_hmac_prehash_key_done()
1828 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_prehash_key_done()
1829 memset((u8 *)hmac->ipad + ds, 0, bs - ds); in atmel_sha_hmac_prehash_key_done()
1835 struct ahash_request *req = dd->req; in atmel_sha_hmac_compute_ipad_hash()
1839 size_t bs = ctx->block_size; in atmel_sha_hmac_compute_ipad_hash()
1842 memcpy(hmac->opad, hmac->ipad, bs); in atmel_sha_hmac_compute_ipad_hash()
1844 hmac->ipad[i] ^= 0x36363636; in atmel_sha_hmac_compute_ipad_hash()
1845 hmac->opad[i] ^= 0x5c5c5c5c; in atmel_sha_hmac_compute_ipad_hash()
1848 return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false, in atmel_sha_hmac_compute_ipad_hash()
1854 struct ahash_request *req = dd->req; in atmel_sha_hmac_compute_opad_hash()
1858 size_t bs = ctx->block_size; in atmel_sha_hmac_compute_opad_hash()
1859 size_t hs = ctx->hash_size; in atmel_sha_hmac_compute_opad_hash()
1863 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_compute_opad_hash()
1864 return atmel_sha_cpu_hash(dd, hmac->opad, bs, false, in atmel_sha_hmac_compute_opad_hash()
1870 struct ahash_request *req = dd->req; in atmel_sha_hmac_setup_done()
1874 size_t hs = ctx->hash_size; in atmel_sha_hmac_setup_done()
1878 hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_setup_done()
1879 atmel_sha_hmac_key_release(&hmac->hkey); in atmel_sha_hmac_setup_done()
1880 return hmac->resume(dd); in atmel_sha_hmac_setup_done()
1885 struct ahash_request *req = dd->req; in atmel_sha_hmac_start()
1893 switch (ctx->op) { in atmel_sha_hmac_start()
1899 dd->resume = atmel_sha_done; in atmel_sha_hmac_start()
1904 dd->resume = atmel_sha_hmac_final; in atmel_sha_hmac_start()
1913 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_hmac_start()
1924 return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen); in atmel_sha_hmac_setkey()
1940 struct ahash_request *req = dd->req; in atmel_sha_hmac_init_done()
1944 size_t bs = ctx->block_size; in atmel_sha_hmac_init_done()
1945 size_t hs = ctx->hash_size; in atmel_sha_hmac_init_done()
1947 ctx->bufcnt = 0; in atmel_sha_hmac_init_done()
1948 ctx->digcnt[0] = bs; in atmel_sha_hmac_init_done()
1949 ctx->digcnt[1] = 0; in atmel_sha_hmac_init_done()
1950 ctx->flags |= SHA_FLAGS_RESTORE; in atmel_sha_hmac_init_done()
1951 memcpy(ctx->digest, hmac->ipad, hs); in atmel_sha_hmac_init_done()
1957 struct ahash_request *req = dd->req; in atmel_sha_hmac_final()
1961 u32 *digest = (u32 *)ctx->digest; in atmel_sha_hmac_final()
1963 size_t bs = ctx->block_size; in atmel_sha_hmac_final()
1964 size_t hs = ctx->hash_size; in atmel_sha_hmac_final()
1977 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_hmac_final()
1980 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); in atmel_sha_hmac_final()
1986 sg_init_one(&dd->tmp, digest, ds); in atmel_sha_hmac_final()
1987 return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true, in atmel_sha_hmac_final()
1994 * req->result might not be sizeof(u32) aligned, so copy the in atmel_sha_hmac_final_done()
1995 * digest into ctx->digest[] before memcpy() the data into in atmel_sha_hmac_final_done()
1996 * req->result. in atmel_sha_hmac_final_done()
1998 atmel_sha_copy_hash(dd->req); in atmel_sha_hmac_final_done()
1999 atmel_sha_copy_ready_hash(dd->req); in atmel_sha_hmac_final_done()
2016 struct ahash_request *req = dd->req; in atmel_sha_hmac_digest2()
2020 size_t hs = ctx->hash_size; in atmel_sha_hmac_digest2()
2026 if (!req->nbytes) in atmel_sha_hmac_digest2()
2027 return atmel_sha_complete(dd, -EINVAL); // TODO: in atmel_sha_hmac_digest2()
2030 if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD && in atmel_sha_hmac_digest2()
2031 atmel_sha_dma_check_aligned(dd, req->src, req->nbytes)) in atmel_sha_hmac_digest2()
2037 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); in atmel_sha_hmac_digest2()
2041 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_hmac_digest2()
2045 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; in atmel_sha_hmac_digest2()
2052 atmel_sha_write(dd, SHA_MSR, req->nbytes); in atmel_sha_hmac_digest2()
2053 atmel_sha_write(dd, SHA_BCR, req->nbytes); in atmel_sha_hmac_digest2()
2059 return atmel_sha_dma_start(dd, req->src, req->nbytes, in atmel_sha_hmac_digest2()
2062 return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true, in atmel_sha_hmac_digest2()
2072 hmac->base.start = atmel_sha_hmac_start; in atmel_sha_hmac_cra_init()
2073 atmel_sha_hmac_key_init(&hmac->hkey); in atmel_sha_hmac_cra_init()
2082 atmel_sha_hmac_key_release(&hmac->hkey); in atmel_sha_hmac_cra_exit()
2099 .cra_driver_name = "atmel-hmac-sha1",
2124 .cra_driver_name = "atmel-hmac-sha224",
2149 .cra_driver_name = "atmel-hmac-sha256",
2174 .cra_driver_name = "atmel-hmac-sha384",
2199 .cra_driver_name = "atmel-hmac-sha512",
2244 struct ahash_request *req = areq->data; in atmel_sha_authenc_complete()
2247 authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async); in atmel_sha_authenc_complete()
2252 struct ahash_request *req = dd->req; in atmel_sha_authenc_start()
2257 * Force atmel_sha_complete() to call req->base.complete(), ie in atmel_sha_authenc_start()
2258 * atmel_sha_authenc_complete(), which in turn calls authctx->cb(). in atmel_sha_authenc_start()
2260 dd->force_complete = true; in atmel_sha_authenc_start()
2263 return authctx->cb(authctx->aes_dev, err, dd->is_async); in atmel_sha_authenc_start()
2287 int err = -EINVAL; in atmel_sha_authenc_spawn()
2291 name = "atmel-hmac-sha1"; in atmel_sha_authenc_spawn()
2295 name = "atmel-hmac-sha224"; in atmel_sha_authenc_spawn()
2299 name = "atmel-hmac-sha256"; in atmel_sha_authenc_spawn()
2303 name = "atmel-hmac-sha384"; in atmel_sha_authenc_spawn()
2307 name = "atmel-hmac-sha512"; in atmel_sha_authenc_spawn()
2320 tctx->start = atmel_sha_authenc_start; in atmel_sha_authenc_spawn()
2321 tctx->flags = mode; in atmel_sha_authenc_spawn()
2325 err = -ENOMEM; in atmel_sha_authenc_spawn()
2328 auth->tfm = tfm; in atmel_sha_authenc_spawn()
2342 crypto_free_ahash(auth->tfm); in atmel_sha_authenc_free()
2351 struct crypto_ahash *tfm = auth->tfm; in atmel_sha_authenc_setkey()
2369 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_schedule()
2370 struct crypto_ahash *tfm = auth->tfm; in atmel_sha_authenc_schedule()
2380 return cb(aes_dev, -ENODEV, false); in atmel_sha_authenc_schedule()
2383 ctx->dd = dd; in atmel_sha_authenc_schedule()
2384 ctx->buflen = SHA_BUFFER_LEN; in atmel_sha_authenc_schedule()
2385 authctx->cb = cb; in atmel_sha_authenc_schedule()
2386 authctx->aes_dev = aes_dev; in atmel_sha_authenc_schedule()
2401 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_init()
2404 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_init()
2407 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_authenc_init()
2409 authctx->cb = cb; in atmel_sha_authenc_init()
2410 authctx->aes_dev = aes_dev; in atmel_sha_authenc_init()
2411 authctx->assoc = assoc; in atmel_sha_authenc_init()
2412 authctx->assoclen = assoclen; in atmel_sha_authenc_init()
2413 authctx->textlen = textlen; in atmel_sha_authenc_init()
2415 ctx->flags = hmac->base.flags; in atmel_sha_authenc_init()
2422 struct ahash_request *req = dd->req; in atmel_sha_authenc_init2()
2424 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_init2()
2427 size_t hs = ctx->hash_size; in atmel_sha_authenc_init2()
2433 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); in atmel_sha_authenc_init2()
2437 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_authenc_init2()
2442 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; in atmel_sha_authenc_init2()
2445 msg_size = authctx->assoclen + authctx->textlen; in atmel_sha_authenc_init2()
2452 return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen, in atmel_sha_authenc_init2()
2459 struct ahash_request *req = dd->req; in atmel_sha_authenc_init_done()
2462 return authctx->cb(authctx->aes_dev, 0, dd->is_async); in atmel_sha_authenc_init_done()
2471 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_final()
2472 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_final()
2474 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_authenc_final()
2476 authctx->digestlen = SHA1_DIGEST_SIZE; in atmel_sha_authenc_final()
2480 authctx->digestlen = SHA224_DIGEST_SIZE; in atmel_sha_authenc_final()
2484 authctx->digestlen = SHA256_DIGEST_SIZE; in atmel_sha_authenc_final()
2488 authctx->digestlen = SHA384_DIGEST_SIZE; in atmel_sha_authenc_final()
2492 authctx->digestlen = SHA512_DIGEST_SIZE; in atmel_sha_authenc_final()
2496 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_authenc_final()
2498 if (authctx->digestlen > digestlen) in atmel_sha_authenc_final()
2499 authctx->digestlen = digestlen; in atmel_sha_authenc_final()
2501 authctx->cb = cb; in atmel_sha_authenc_final()
2502 authctx->aes_dev = aes_dev; in atmel_sha_authenc_final()
2503 authctx->digest = digest; in atmel_sha_authenc_final()
2511 struct ahash_request *req = dd->req; in atmel_sha_authenc_final_done()
2513 size_t i, num_words = authctx->digestlen / sizeof(u32); in atmel_sha_authenc_final_done()
2516 authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_authenc_final_done()
2524 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_abort()
2525 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_abort()
2527 /* Prevent atmel_sha_complete() from calling req->base.complete(). */ in atmel_sha_authenc_abort()
2528 dd->is_async = false; in atmel_sha_authenc_abort()
2529 dd->force_complete = false; in atmel_sha_authenc_abort()
2541 if (dd->caps.has_hmac) in atmel_sha_unregister_algs()
2548 if (dd->caps.has_sha224) in atmel_sha_unregister_algs()
2551 if (dd->caps.has_sha_384_512) { in atmel_sha_unregister_algs()
2567 if (dd->caps.has_sha224) { in atmel_sha_register_algs()
2573 if (dd->caps.has_sha_384_512) { in atmel_sha_register_algs()
2581 if (dd->caps.has_hmac) { in atmel_sha_register_algs()
2613 if (sl && sl->dma_dev == chan->device->dev) { in atmel_sha_filter()
2614 chan->private = sl; in atmel_sha_filter()
2630 dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in, in atmel_sha_dma_init()
2631 atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); in atmel_sha_dma_init()
2632 if (!dd->dma_lch_in.chan) { in atmel_sha_dma_init()
2633 dev_warn(dd->dev, "no DMA channel available\n"); in atmel_sha_dma_init()
2634 return -ENODEV; in atmel_sha_dma_init()
2637 dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; in atmel_sha_dma_init()
2638 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + in atmel_sha_dma_init()
2640 dd->dma_lch_in.dma_conf.src_maxburst = 1; in atmel_sha_dma_init()
2641 dd->dma_lch_in.dma_conf.src_addr_width = in atmel_sha_dma_init()
2643 dd->dma_lch_in.dma_conf.dst_maxburst = 1; in atmel_sha_dma_init()
2644 dd->dma_lch_in.dma_conf.dst_addr_width = in atmel_sha_dma_init()
2646 dd->dma_lch_in.dma_conf.device_fc = false; in atmel_sha_dma_init()
2653 dma_release_channel(dd->dma_lch_in.chan); in atmel_sha_dma_cleanup()
2659 dd->caps.has_dma = 0; in atmel_sha_get_cap()
2660 dd->caps.has_dualbuff = 0; in atmel_sha_get_cap()
2661 dd->caps.has_sha224 = 0; in atmel_sha_get_cap()
2662 dd->caps.has_sha_384_512 = 0; in atmel_sha_get_cap()
2663 dd->caps.has_uihv = 0; in atmel_sha_get_cap()
2664 dd->caps.has_hmac = 0; in atmel_sha_get_cap()
2667 switch (dd->hw_version & 0xff0) { in atmel_sha_get_cap()
2669 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2670 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2671 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2672 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2673 dd->caps.has_uihv = 1; in atmel_sha_get_cap()
2674 dd->caps.has_hmac = 1; in atmel_sha_get_cap()
2677 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2678 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2679 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2680 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2681 dd->caps.has_uihv = 1; in atmel_sha_get_cap()
2684 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2685 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2686 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2687 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2690 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2691 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2692 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2697 dev_warn(dd->dev, in atmel_sha_get_cap()
2705 { .compatible = "atmel,at91sam9g46-sha" },
2713 struct device_node *np = pdev->dev.of_node; in atmel_sha_of_init()
2717 dev_err(&pdev->dev, "device node not found\n"); in atmel_sha_of_init()
2718 return ERR_PTR(-EINVAL); in atmel_sha_of_init()
2721 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); in atmel_sha_of_init()
2723 return ERR_PTR(-ENOMEM); in atmel_sha_of_init()
2725 pdata->dma_slave = devm_kzalloc(&pdev->dev, in atmel_sha_of_init()
2726 sizeof(*(pdata->dma_slave)), in atmel_sha_of_init()
2728 if (!pdata->dma_slave) in atmel_sha_of_init()
2729 return ERR_PTR(-ENOMEM); in atmel_sha_of_init()
2736 return ERR_PTR(-EINVAL); in atmel_sha_of_init()
2744 struct device *dev = &pdev->dev; in atmel_sha_probe()
2748 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); in atmel_sha_probe()
2750 err = -ENOMEM; in atmel_sha_probe()
2754 sha_dd->dev = dev; in atmel_sha_probe()
2758 INIT_LIST_HEAD(&sha_dd->list); in atmel_sha_probe()
2759 spin_lock_init(&sha_dd->lock); in atmel_sha_probe()
2761 tasklet_init(&sha_dd->done_task, atmel_sha_done_task, in atmel_sha_probe()
2763 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task, in atmel_sha_probe()
2766 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); in atmel_sha_probe()
2772 err = -ENODEV; in atmel_sha_probe()
2775 sha_dd->phys_base = sha_res->start; in atmel_sha_probe()
2778 sha_dd->irq = platform_get_irq(pdev, 0); in atmel_sha_probe()
2779 if (sha_dd->irq < 0) { in atmel_sha_probe()
2781 err = sha_dd->irq; in atmel_sha_probe()
2785 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq, in atmel_sha_probe()
2786 IRQF_SHARED, "atmel-sha", sha_dd); in atmel_sha_probe()
2793 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk"); in atmel_sha_probe()
2794 if (IS_ERR(sha_dd->iclk)) { in atmel_sha_probe()
2796 err = PTR_ERR(sha_dd->iclk); in atmel_sha_probe()
2800 sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res); in atmel_sha_probe()
2801 if (IS_ERR(sha_dd->io_base)) { in atmel_sha_probe()
2803 err = PTR_ERR(sha_dd->io_base); in atmel_sha_probe()
2807 err = clk_prepare(sha_dd->iclk); in atmel_sha_probe()
2815 if (sha_dd->caps.has_dma) { in atmel_sha_probe()
2816 pdata = pdev->dev.platform_data; in atmel_sha_probe()
2820 dev_err(&pdev->dev, "platform data not available\n"); in atmel_sha_probe()
2825 if (!pdata->dma_slave) { in atmel_sha_probe()
2826 err = -ENXIO; in atmel_sha_probe()
2834 dma_chan_name(sha_dd->dma_lch_in.chan)); in atmel_sha_probe()
2838 list_add_tail(&sha_dd->list, &atmel_sha.dev_list); in atmel_sha_probe()
2846 sha_dd->caps.has_sha224 ? "/SHA224" : "", in atmel_sha_probe()
2847 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); in atmel_sha_probe()
2853 list_del(&sha_dd->list); in atmel_sha_probe()
2855 if (sha_dd->caps.has_dma) in atmel_sha_probe()
2859 clk_unprepare(sha_dd->iclk); in atmel_sha_probe()
2861 tasklet_kill(&sha_dd->queue_task); in atmel_sha_probe()
2862 tasklet_kill(&sha_dd->done_task); in atmel_sha_probe()
2875 return -ENODEV; in atmel_sha_remove()
2877 list_del(&sha_dd->list); in atmel_sha_remove()
2882 tasklet_kill(&sha_dd->queue_task); in atmel_sha_remove()
2883 tasklet_kill(&sha_dd->done_task); in atmel_sha_remove()
2885 if (sha_dd->caps.has_dma) in atmel_sha_remove()
2888 clk_unprepare(sha_dd->iclk); in atmel_sha_remove()
2906 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");