Lines Matching +full:resume +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0
7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL
10 * Some ideas are from omap-sham.c drivers.
30 #include <linux/dma-mapping.h>
40 #include "atmel-sha-regs.h"
41 #include "atmel-authenc.h"
102 unsigned int offset; /* offset in current sg */ member
147 atmel_sha_fn_t resume; member
170 static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr) in atmel_sha_reg_name() argument
172 switch (offset) { in atmel_sha_reg_name()
213 snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2); in atmel_sha_reg_name()
234 16u + ((offset - SHA_REG_DIGEST(0)) >> 2)); in atmel_sha_reg_name()
237 (offset - SHA_REG_DIGEST(0)) >> 2); in atmel_sha_reg_name()
244 snprintf(tmp, sz, "0x%02x", offset); in atmel_sha_reg_name()
253 static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) in atmel_sha_read() argument
255 u32 value = readl_relaxed(dd->io_base + offset); in atmel_sha_read()
258 if (dd->flags & SHA_FLAGS_DUMP_REG) { in atmel_sha_read()
261 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, in atmel_sha_read()
262 atmel_sha_reg_name(offset, tmp, sizeof(tmp), false)); in atmel_sha_read()
270 u32 offset, u32 value) in atmel_sha_write() argument
273 if (dd->flags & SHA_FLAGS_DUMP_REG) { in atmel_sha_write()
276 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, in atmel_sha_write()
277 atmel_sha_reg_name(offset, tmp, sizeof(tmp), true)); in atmel_sha_write()
281 writel_relaxed(value, dd->io_base + offset); in atmel_sha_write()
286 struct ahash_request *req = dd->req; in atmel_sha_complete()
288 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | in atmel_sha_complete()
292 clk_disable(dd->iclk); in atmel_sha_complete()
294 if ((dd->is_async || dd->force_complete) && req->base.complete) in atmel_sha_complete()
298 tasklet_schedule(&dd->queue_task); in atmel_sha_complete()
307 while ((ctx->bufcnt < ctx->buflen) && ctx->total) { in atmel_sha_append_sg()
308 count = min(ctx->sg->length - ctx->offset, ctx->total); in atmel_sha_append_sg()
309 count = min(count, ctx->buflen - ctx->bufcnt); in atmel_sha_append_sg()
318 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { in atmel_sha_append_sg()
319 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
326 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, in atmel_sha_append_sg()
327 ctx->offset, count, 0); in atmel_sha_append_sg()
329 ctx->bufcnt += count; in atmel_sha_append_sg()
330 ctx->offset += count; in atmel_sha_append_sg()
331 ctx->total -= count; in atmel_sha_append_sg()
333 if (ctx->offset == ctx->sg->length) { in atmel_sha_append_sg()
334 ctx->sg = sg_next(ctx->sg); in atmel_sha_append_sg()
335 if (ctx->sg) in atmel_sha_append_sg()
336 ctx->offset = 0; in atmel_sha_append_sg()
338 ctx->total = 0; in atmel_sha_append_sg()
349 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
354 * - if message length < 56 bytes then padlen = 56 - message length
355 * - else padlen = 64 + 56 - message length
358 * - if message length < 112 bytes then padlen = 112 - message length
359 * - else padlen = 128 + 112 - message length
367 size[0] = ctx->digcnt[0]; in atmel_sha_fill_padding()
368 size[1] = ctx->digcnt[1]; in atmel_sha_fill_padding()
370 size[0] += ctx->bufcnt; in atmel_sha_fill_padding()
371 if (size[0] < ctx->bufcnt) in atmel_sha_fill_padding()
381 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_fill_padding()
384 index = ctx->bufcnt & 0x7f; in atmel_sha_fill_padding()
385 padlen = (index < 112) ? (112 - index) : ((128+112) - index); in atmel_sha_fill_padding()
386 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
387 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
388 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); in atmel_sha_fill_padding()
389 ctx->bufcnt += padlen + 16; in atmel_sha_fill_padding()
390 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
394 index = ctx->bufcnt & 0x3f; in atmel_sha_fill_padding()
395 padlen = (index < 56) ? (56 - index) : ((64+56) - index); in atmel_sha_fill_padding()
396 *(ctx->buffer + ctx->bufcnt) = 0x80; in atmel_sha_fill_padding()
397 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); in atmel_sha_fill_padding()
398 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); in atmel_sha_fill_padding()
399 ctx->bufcnt += padlen + 8; in atmel_sha_fill_padding()
400 ctx->flags |= SHA_FLAGS_PAD; in atmel_sha_fill_padding()
411 if (!tctx->dd) { in atmel_sha_find_dev()
416 tctx->dd = dd; in atmel_sha_find_dev()
418 dd = tctx->dd; in atmel_sha_find_dev()
433 ctx->dd = dd; in atmel_sha_init()
435 ctx->flags = 0; in atmel_sha_init()
437 dev_dbg(dd->dev, "init: digest size: %u\n", in atmel_sha_init()
442 ctx->flags |= SHA_FLAGS_SHA1; in atmel_sha_init()
443 ctx->block_size = SHA1_BLOCK_SIZE; in atmel_sha_init()
446 ctx->flags |= SHA_FLAGS_SHA224; in atmel_sha_init()
447 ctx->block_size = SHA224_BLOCK_SIZE; in atmel_sha_init()
450 ctx->flags |= SHA_FLAGS_SHA256; in atmel_sha_init()
451 ctx->block_size = SHA256_BLOCK_SIZE; in atmel_sha_init()
454 ctx->flags |= SHA_FLAGS_SHA384; in atmel_sha_init()
455 ctx->block_size = SHA384_BLOCK_SIZE; in atmel_sha_init()
458 ctx->flags |= SHA_FLAGS_SHA512; in atmel_sha_init()
459 ctx->block_size = SHA512_BLOCK_SIZE; in atmel_sha_init()
462 return -EINVAL; in atmel_sha_init()
465 ctx->bufcnt = 0; in atmel_sha_init()
466 ctx->digcnt[0] = 0; in atmel_sha_init()
467 ctx->digcnt[1] = 0; in atmel_sha_init()
468 ctx->buflen = SHA_BUFFER_LEN; in atmel_sha_init()
475 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_write_ctrl()
480 if (!dd->caps.has_dma) in atmel_sha_write_ctrl()
483 if (dd->caps.has_dualbuff) in atmel_sha_write_ctrl()
489 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_write_ctrl()
520 if (!(ctx->digcnt[0] || ctx->digcnt[1])) { in atmel_sha_write_ctrl()
522 } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) { in atmel_sha_write_ctrl()
523 const u32 *hash = (const u32 *)ctx->digest; in atmel_sha_write_ctrl()
531 ctx->flags &= ~SHA_FLAGS_RESTORE; in atmel_sha_write_ctrl()
549 atmel_sha_fn_t resume) in atmel_sha_wait_for_data_ready() argument
554 return resume(dd); in atmel_sha_wait_for_data_ready()
556 dd->resume = resume; in atmel_sha_wait_for_data_ready()
558 return -EINPROGRESS; in atmel_sha_wait_for_data_ready()
564 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_cpu()
568 dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_cpu()
569 ctx->digcnt[1], ctx->digcnt[0], length, final); in atmel_sha_xmit_cpu()
573 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_cpu()
574 ctx->digcnt[0] += length; in atmel_sha_xmit_cpu()
575 if (ctx->digcnt[0] < length) in atmel_sha_xmit_cpu()
576 ctx->digcnt[1]++; in atmel_sha_xmit_cpu()
579 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_cpu()
583 dd->flags |= SHA_FLAGS_CPU; in atmel_sha_xmit_cpu()
588 return -EINPROGRESS; in atmel_sha_xmit_cpu()
594 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_pdc()
597 dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_pdc()
598 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_pdc()
611 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_pdc()
612 ctx->digcnt[0] += length1; in atmel_sha_xmit_pdc()
613 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_pdc()
614 ctx->digcnt[1]++; in atmel_sha_xmit_pdc()
617 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_pdc()
619 dd->flags |= SHA_FLAGS_DMA_ACTIVE; in atmel_sha_xmit_pdc()
624 return -EINPROGRESS; in atmel_sha_xmit_pdc()
631 dd->is_async = true; in atmel_sha_dma_callback()
633 /* dma_lch_in - completed - wait DATRDY */ in atmel_sha_dma_callback()
640 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_xmit_dma()
644 dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", in atmel_sha_xmit_dma()
645 ctx->digcnt[1], ctx->digcnt[0], length1, final); in atmel_sha_xmit_dma()
647 dd->dma_lch_in.dma_conf.src_maxburst = 16; in atmel_sha_xmit_dma()
648 dd->dma_lch_in.dma_conf.dst_maxburst = 16; in atmel_sha_xmit_dma()
650 dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); in atmel_sha_xmit_dma()
658 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, in atmel_sha_xmit_dma()
664 in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, in atmel_sha_xmit_dma()
668 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_xmit_dma()
670 in_desc->callback = atmel_sha_dma_callback; in atmel_sha_xmit_dma()
671 in_desc->callback_param = dd; in atmel_sha_xmit_dma()
675 /* should be non-zero before next lines to disable clocks later */ in atmel_sha_xmit_dma()
676 ctx->digcnt[0] += length1; in atmel_sha_xmit_dma()
677 if (ctx->digcnt[0] < length1) in atmel_sha_xmit_dma()
678 ctx->digcnt[1]++; in atmel_sha_xmit_dma()
681 dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ in atmel_sha_xmit_dma()
683 dd->flags |= SHA_FLAGS_DMA_ACTIVE; in atmel_sha_xmit_dma()
687 dma_async_issue_pending(dd->dma_lch_in.chan); in atmel_sha_xmit_dma()
689 return -EINPROGRESS; in atmel_sha_xmit_dma()
695 if (dd->caps.has_dma) in atmel_sha_xmit_start()
705 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_cpu()
710 bufcnt = ctx->bufcnt; in atmel_sha_update_cpu()
711 ctx->bufcnt = 0; in atmel_sha_update_cpu()
713 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); in atmel_sha_update_cpu()
720 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_xmit_dma_map()
721 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_xmit_dma_map()
722 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_xmit_dma_map()
723 dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen + in atmel_sha_xmit_dma_map()
724 ctx->block_size); in atmel_sha_xmit_dma_map()
725 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_xmit_dma_map()
728 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_xmit_dma_map()
731 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); in atmel_sha_xmit_dma_map()
736 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_slow()
742 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_slow()
744 dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n", in atmel_sha_update_dma_slow()
745 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); in atmel_sha_update_dma_slow()
750 if (final || (ctx->bufcnt == ctx->buflen)) { in atmel_sha_update_dma_slow()
751 count = ctx->bufcnt; in atmel_sha_update_dma_slow()
752 ctx->bufcnt = 0; in atmel_sha_update_dma_slow()
761 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_start()
766 if (!ctx->total) in atmel_sha_update_dma_start()
769 if (ctx->bufcnt || ctx->offset) in atmel_sha_update_dma_start()
772 dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n", in atmel_sha_update_dma_start()
773 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); in atmel_sha_update_dma_start()
775 sg = ctx->sg; in atmel_sha_update_dma_start()
777 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in atmel_sha_update_dma_start()
780 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) in atmel_sha_update_dma_start()
781 /* size is not ctx->block_size aligned */ in atmel_sha_update_dma_start()
784 length = min(ctx->total, sg->length); in atmel_sha_update_dma_start()
787 if (!(ctx->flags & SHA_FLAGS_FINUP)) { in atmel_sha_update_dma_start()
788 /* not last sg must be ctx->block_size aligned */ in atmel_sha_update_dma_start()
789 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
790 length -= tail; in atmel_sha_update_dma_start()
794 ctx->total -= length; in atmel_sha_update_dma_start()
795 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
797 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; in atmel_sha_update_dma_start()
801 tail = length & (ctx->block_size - 1); in atmel_sha_update_dma_start()
802 length -= tail; in atmel_sha_update_dma_start()
803 ctx->total += tail; in atmel_sha_update_dma_start()
804 ctx->offset = length; /* offset where to start slow */ in atmel_sha_update_dma_start()
806 sg = ctx->sg; in atmel_sha_update_dma_start()
811 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, in atmel_sha_update_dma_start()
812 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_start()
813 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { in atmel_sha_update_dma_start()
814 dev_err(dd->dev, "dma %zu bytes error\n", in atmel_sha_update_dma_start()
815 ctx->buflen + ctx->block_size); in atmel_sha_update_dma_start()
816 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
820 ctx->flags &= ~SHA_FLAGS_SG; in atmel_sha_update_dma_start()
821 count = ctx->bufcnt; in atmel_sha_update_dma_start()
822 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
823 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, in atmel_sha_update_dma_start()
826 ctx->sg = sg; in atmel_sha_update_dma_start()
827 if (!dma_map_sg(dd->dev, ctx->sg, 1, in atmel_sha_update_dma_start()
829 dev_err(dd->dev, "dma_map_sg error\n"); in atmel_sha_update_dma_start()
830 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
833 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
835 count = ctx->bufcnt; in atmel_sha_update_dma_start()
836 ctx->bufcnt = 0; in atmel_sha_update_dma_start()
837 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), in atmel_sha_update_dma_start()
838 length, ctx->dma_addr, count, final); in atmel_sha_update_dma_start()
842 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { in atmel_sha_update_dma_start()
843 dev_err(dd->dev, "dma_map_sg error\n"); in atmel_sha_update_dma_start()
844 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_update_dma_start()
847 ctx->flags |= SHA_FLAGS_SG; in atmel_sha_update_dma_start()
850 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, in atmel_sha_update_dma_start()
856 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); in atmel_sha_update_dma_stop()
858 if (ctx->flags & SHA_FLAGS_SG) { in atmel_sha_update_dma_stop()
859 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
860 if (ctx->sg->length == ctx->offset) { in atmel_sha_update_dma_stop()
861 ctx->sg = sg_next(ctx->sg); in atmel_sha_update_dma_stop()
862 if (ctx->sg) in atmel_sha_update_dma_stop()
863 ctx->offset = 0; in atmel_sha_update_dma_stop()
865 if (ctx->flags & SHA_FLAGS_PAD) { in atmel_sha_update_dma_stop()
866 dma_unmap_single(dd->dev, ctx->dma_addr, in atmel_sha_update_dma_stop()
867 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
870 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + in atmel_sha_update_dma_stop()
871 ctx->block_size, DMA_TO_DEVICE); in atmel_sha_update_dma_stop()
877 struct ahash_request *req = dd->req; in atmel_sha_update_req()
881 dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", in atmel_sha_update_req()
882 ctx->total, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
884 if (ctx->flags & SHA_FLAGS_CPU) in atmel_sha_update_req()
890 dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", in atmel_sha_update_req()
891 err, ctx->digcnt[1], ctx->digcnt[0]); in atmel_sha_update_req()
898 struct ahash_request *req = dd->req; in atmel_sha_final_req()
903 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { in atmel_sha_final_req()
905 count = ctx->bufcnt; in atmel_sha_final_req()
906 ctx->bufcnt = 0; in atmel_sha_final_req()
912 count = ctx->bufcnt; in atmel_sha_final_req()
913 ctx->bufcnt = 0; in atmel_sha_final_req()
914 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); in atmel_sha_final_req()
917 dev_dbg(dd->dev, "final_req: err: %d\n", err); in atmel_sha_final_req()
925 u32 *hash = (u32 *)ctx->digest; in atmel_sha_copy_hash()
928 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_copy_hash()
949 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); in atmel_sha_copy_hash()
950 ctx->flags |= SHA_FLAGS_RESTORE; in atmel_sha_copy_hash()
957 if (!req->result) in atmel_sha_copy_ready_hash()
960 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_copy_ready_hash()
963 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
967 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
971 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
975 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
979 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); in atmel_sha_copy_ready_hash()
987 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish()
989 if (ctx->digcnt[0] || ctx->digcnt[1]) in atmel_sha_finish()
992 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1], in atmel_sha_finish()
993 ctx->digcnt[0], ctx->bufcnt); in atmel_sha_finish()
1001 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_finish_req()
1005 if (SHA_FLAGS_FINAL & dd->flags) in atmel_sha_finish_req()
1008 ctx->flags |= SHA_FLAGS_ERROR; in atmel_sha_finish_req()
1019 err = clk_enable(dd->iclk); in atmel_sha_hw_init()
1023 if (!(SHA_FLAGS_INIT & dd->flags)) { in atmel_sha_hw_init()
1025 dd->flags |= SHA_FLAGS_INIT; in atmel_sha_hw_init()
1044 dd->hw_version = atmel_sha_get_version(dd); in atmel_sha_hw_version_init()
1046 dev_info(dd->dev, in atmel_sha_hw_version_init()
1047 "version: 0x%x\n", dd->hw_version); in atmel_sha_hw_version_init()
1049 clk_disable(dd->iclk); in atmel_sha_hw_version_init()
1063 spin_lock_irqsave(&dd->lock, flags); in atmel_sha_handle_queue()
1065 ret = ahash_enqueue_request(&dd->queue, req); in atmel_sha_handle_queue()
1067 if (SHA_FLAGS_BUSY & dd->flags) { in atmel_sha_handle_queue()
1068 spin_unlock_irqrestore(&dd->lock, flags); in atmel_sha_handle_queue()
1072 backlog = crypto_get_backlog(&dd->queue); in atmel_sha_handle_queue()
1073 async_req = crypto_dequeue_request(&dd->queue); in atmel_sha_handle_queue()
1075 dd->flags |= SHA_FLAGS_BUSY; in atmel_sha_handle_queue()
1077 spin_unlock_irqrestore(&dd->lock, flags); in atmel_sha_handle_queue()
1083 crypto_request_complete(backlog, -EINPROGRESS); in atmel_sha_handle_queue()
1085 ctx = crypto_tfm_ctx(async_req->tfm); in atmel_sha_handle_queue()
1087 dd->req = ahash_request_cast(async_req); in atmel_sha_handle_queue()
1088 start_async = (dd->req != req); in atmel_sha_handle_queue()
1089 dd->is_async = start_async; in atmel_sha_handle_queue()
1090 dd->force_complete = false; in atmel_sha_handle_queue()
1092 /* WARNING: ctx->start() MAY change dd->is_async. */ in atmel_sha_handle_queue()
1093 err = ctx->start(dd); in atmel_sha_handle_queue()
1101 struct ahash_request *req = dd->req; in atmel_sha_start()
1105 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %u\n", in atmel_sha_start()
1106 ctx->op, req->nbytes); in atmel_sha_start()
1114 * -EINPROGRESS: the hardware is busy and the SHA driver will resume in atmel_sha_start()
1131 dd->resume = atmel_sha_done; in atmel_sha_start()
1132 if (ctx->op == SHA_OP_UPDATE) { in atmel_sha_start()
1134 if (!err && (ctx->flags & SHA_FLAGS_FINUP)) in atmel_sha_start()
1137 } else if (ctx->op == SHA_OP_FINAL) { in atmel_sha_start()
1145 dev_dbg(dd->dev, "exit, err: %d\n", err); in atmel_sha_start()
1153 struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); in atmel_sha_enqueue()
1154 struct atmel_sha_dev *dd = tctx->dd; in atmel_sha_enqueue()
1156 ctx->op = op; in atmel_sha_enqueue()
1165 if (!req->nbytes) in atmel_sha_update()
1168 ctx->total = req->nbytes; in atmel_sha_update()
1169 ctx->sg = req->src; in atmel_sha_update()
1170 ctx->offset = 0; in atmel_sha_update()
1172 if (ctx->flags & SHA_FLAGS_FINUP) { in atmel_sha_update()
1173 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) in atmel_sha_update()
1175 ctx->flags |= SHA_FLAGS_CPU; in atmel_sha_update()
1176 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { in atmel_sha_update()
1187 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_final()
1189 if (ctx->flags & SHA_FLAGS_ERROR) in atmel_sha_final()
1192 if (ctx->flags & SHA_FLAGS_PAD) in atmel_sha_final()
1204 ctx->flags |= SHA_FLAGS_FINUP; in atmel_sha_finup()
1207 if (err1 == -EINPROGRESS || in atmel_sha_finup()
1208 (err1 == -EBUSY && (ahash_request_flags(req) & in atmel_sha_finup()
1249 ctx->start = atmel_sha_start; in atmel_sha_cra_init()
1256 alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; in atmel_sha_alg_init()
1257 alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; in atmel_sha_alg_init()
1258 alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_ctx); in atmel_sha_alg_init()
1259 alg->halg.base.cra_module = THIS_MODULE; in atmel_sha_alg_init()
1260 alg->halg.base.cra_init = atmel_sha_cra_init; in atmel_sha_alg_init()
1262 alg->halg.statesize = sizeof(struct atmel_sha_reqctx); in atmel_sha_alg_init()
1264 alg->init = atmel_sha_init; in atmel_sha_alg_init()
1265 alg->update = atmel_sha_update; in atmel_sha_alg_init()
1266 alg->final = atmel_sha_final; in atmel_sha_alg_init()
1267 alg->finup = atmel_sha_finup; in atmel_sha_alg_init()
1268 alg->digest = atmel_sha_digest; in atmel_sha_alg_init()
1269 alg->export = atmel_sha_export; in atmel_sha_alg_init()
1270 alg->import = atmel_sha_import; in atmel_sha_alg_init()
1276 .halg.base.cra_driver_name = "atmel-sha1",
1283 .halg.base.cra_driver_name = "atmel-sha256",
1292 .halg.base.cra_driver_name = "atmel-sha224",
1301 .halg.base.cra_driver_name = "atmel-sha384",
1309 .halg.base.cra_driver_name = "atmel-sha512",
1328 if (SHA_FLAGS_CPU & dd->flags) { in atmel_sha_done()
1329 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { in atmel_sha_done()
1330 dd->flags &= ~SHA_FLAGS_OUTPUT_READY; in atmel_sha_done()
1333 } else if (SHA_FLAGS_DMA_READY & dd->flags) { in atmel_sha_done()
1334 if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { in atmel_sha_done()
1335 dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; in atmel_sha_done()
1338 if (SHA_FLAGS_OUTPUT_READY & dd->flags) { in atmel_sha_done()
1339 /* hash or semi-hash ready */ in atmel_sha_done()
1340 dd->flags &= ~(SHA_FLAGS_DMA_READY | in atmel_sha_done()
1343 if (err != -EINPROGRESS) in atmel_sha_done()
1351 atmel_sha_finish_req(dd->req, err); in atmel_sha_done()
1360 dd->is_async = true; in atmel_sha_done_task()
1361 (void)dd->resume(dd); in atmel_sha_done_task()
1372 if (SHA_FLAGS_BUSY & sha_dd->flags) { in atmel_sha_irq()
1373 sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; in atmel_sha_irq()
1374 if (!(SHA_FLAGS_CPU & sha_dd->flags)) in atmel_sha_irq()
1375 sha_dd->flags |= SHA_FLAGS_DMA_READY; in atmel_sha_irq()
1376 tasklet_schedule(&sha_dd->done_task); in atmel_sha_irq()
1378 dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); in atmel_sha_irq()
1393 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_check_aligned()
1394 struct ahash_request *req = dd->req; in atmel_sha_dma_check_aligned()
1396 size_t bs = ctx->block_size; in atmel_sha_dma_check_aligned()
1400 if (!IS_ALIGNED(sg->offset, sizeof(u32))) in atmel_sha_dma_check_aligned()
1407 if (len <= sg->length) { in atmel_sha_dma_check_aligned()
1408 dma->nents = nents + 1; in atmel_sha_dma_check_aligned()
1409 dma->last_sg_length = sg->length; in atmel_sha_dma_check_aligned()
1410 sg->length = ALIGN(len, sizeof(u32)); in atmel_sha_dma_check_aligned()
1415 if (!IS_ALIGNED(sg->length, bs)) in atmel_sha_dma_check_aligned()
1418 len -= sg->length; in atmel_sha_dma_check_aligned()
1427 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_callback2()
1431 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_callback2()
1433 sg = dma->sg; in atmel_sha_dma_callback2()
1434 for (nents = 0; nents < dma->nents - 1; ++nents) in atmel_sha_dma_callback2()
1436 sg->length = dma->last_sg_length; in atmel_sha_dma_callback2()
1438 dd->is_async = true; in atmel_sha_dma_callback2()
1439 (void)atmel_sha_wait_for_data_ready(dd, dd->resume); in atmel_sha_dma_callback2()
1445 atmel_sha_fn_t resume) in atmel_sha_dma_start() argument
1447 struct atmel_sha_dma *dma = &dd->dma_lch_in; in atmel_sha_dma_start()
1448 struct dma_slave_config *config = &dma->dma_conf; in atmel_sha_dma_start()
1449 struct dma_chan *chan = dma->chan; in atmel_sha_dma_start()
1455 dd->resume = resume; in atmel_sha_dma_start()
1458 * dma->nents has already been initialized by in atmel_sha_dma_start()
1461 dma->sg = src; in atmel_sha_dma_start()
1462 sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_start()
1464 err = -ENOMEM; in atmel_sha_dma_start()
1468 config->src_maxburst = 16; in atmel_sha_dma_start()
1469 config->dst_maxburst = 16; in atmel_sha_dma_start()
1474 desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV, in atmel_sha_dma_start()
1477 err = -ENOMEM; in atmel_sha_dma_start()
1481 desc->callback = atmel_sha_dma_callback2; in atmel_sha_dma_start()
1482 desc->callback_param = dd; in atmel_sha_dma_start()
1490 return -EINPROGRESS; in atmel_sha_dma_start()
1493 dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); in atmel_sha_dma_start()
1503 struct ahash_request *req = dd->req; in atmel_sha_cpu_transfer()
1505 const u32 *words = (const u32 *)ctx->buffer; in atmel_sha_cpu_transfer()
1509 din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1; in atmel_sha_cpu_transfer()
1512 num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32)); in atmel_sha_cpu_transfer()
1516 ctx->offset += ctx->bufcnt; in atmel_sha_cpu_transfer()
1517 ctx->total -= ctx->bufcnt; in atmel_sha_cpu_transfer()
1519 if (!ctx->total) in atmel_sha_cpu_transfer()
1524 * Fill ctx->buffer now with the next data to be written into in atmel_sha_cpu_transfer()
1530 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); in atmel_sha_cpu_transfer()
1531 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, in atmel_sha_cpu_transfer()
1532 ctx->offset, ctx->bufcnt, 0); in atmel_sha_cpu_transfer()
1538 dd->resume = atmel_sha_cpu_transfer; in atmel_sha_cpu_transfer()
1540 return -EINPROGRESS; in atmel_sha_cpu_transfer()
1544 if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY))) in atmel_sha_cpu_transfer()
1545 return dd->cpu_transfer_complete(dd); in atmel_sha_cpu_transfer()
1547 return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete); in atmel_sha_cpu_transfer()
1555 atmel_sha_fn_t resume) in atmel_sha_cpu_start() argument
1557 struct ahash_request *req = dd->req; in atmel_sha_cpu_start()
1561 return resume(dd); in atmel_sha_cpu_start()
1563 ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY); in atmel_sha_cpu_start()
1566 ctx->flags |= SHA_FLAGS_IDATAR0; in atmel_sha_cpu_start()
1569 ctx->flags |= SHA_FLAGS_WAIT_DATARDY; in atmel_sha_cpu_start()
1571 ctx->sg = sg; in atmel_sha_cpu_start()
1572 ctx->total = len; in atmel_sha_cpu_start()
1573 ctx->offset = 0; in atmel_sha_cpu_start()
1576 ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); in atmel_sha_cpu_start()
1577 scatterwalk_map_and_copy(ctx->buffer, ctx->sg, in atmel_sha_cpu_start()
1578 ctx->offset, ctx->bufcnt, 0); in atmel_sha_cpu_start()
1580 dd->cpu_transfer_complete = resume; in atmel_sha_cpu_start()
1587 atmel_sha_fn_t resume) in atmel_sha_cpu_hash() argument
1589 struct ahash_request *req = dd->req; in atmel_sha_cpu_hash()
1594 if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding)) in atmel_sha_cpu_hash()
1595 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_cpu_hash()
1597 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); in atmel_sha_cpu_hash()
1603 sg_init_one(&dd->tmp, data, datalen); in atmel_sha_cpu_hash()
1604 return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume); in atmel_sha_cpu_hash()
1624 kfree(hkey->keydup); in atmel_sha_hmac_key_release()
1634 if (keylen > sizeof(hkey->buffer)) { in atmel_sha_hmac_key_set()
1635 hkey->keydup = kmemdup(key, keylen, GFP_KERNEL); in atmel_sha_hmac_key_set()
1636 if (!hkey->keydup) in atmel_sha_hmac_key_set()
1637 return -ENOMEM; in atmel_sha_hmac_key_set()
1640 memcpy(hkey->buffer, key, keylen); in atmel_sha_hmac_key_set()
1643 hkey->valid = true; in atmel_sha_hmac_key_set()
1644 hkey->keylen = keylen; in atmel_sha_hmac_key_set()
1652 if (!hkey->valid) in atmel_sha_hmac_key_get()
1655 *keylen = hkey->keylen; in atmel_sha_hmac_key_get()
1656 *key = (hkey->keydup) ? hkey->keydup : hkey->buffer; in atmel_sha_hmac_key_get()
1667 atmel_sha_fn_t resume; member
1671 atmel_sha_fn_t resume);
1685 atmel_sha_fn_t resume) in atmel_sha_hmac_setup() argument
1687 struct ahash_request *req = dd->req; in atmel_sha_hmac_setup()
1695 hmac->resume = resume; in atmel_sha_hmac_setup()
1696 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_hmac_setup()
1698 ctx->block_size = SHA1_BLOCK_SIZE; in atmel_sha_hmac_setup()
1699 ctx->hash_size = SHA1_DIGEST_SIZE; in atmel_sha_hmac_setup()
1703 ctx->block_size = SHA224_BLOCK_SIZE; in atmel_sha_hmac_setup()
1704 ctx->hash_size = SHA256_DIGEST_SIZE; in atmel_sha_hmac_setup()
1708 ctx->block_size = SHA256_BLOCK_SIZE; in atmel_sha_hmac_setup()
1709 ctx->hash_size = SHA256_DIGEST_SIZE; in atmel_sha_hmac_setup()
1713 ctx->block_size = SHA384_BLOCK_SIZE; in atmel_sha_hmac_setup()
1714 ctx->hash_size = SHA512_DIGEST_SIZE; in atmel_sha_hmac_setup()
1718 ctx->block_size = SHA512_BLOCK_SIZE; in atmel_sha_hmac_setup()
1719 ctx->hash_size = SHA512_DIGEST_SIZE; in atmel_sha_hmac_setup()
1723 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_hmac_setup()
1725 bs = ctx->block_size; in atmel_sha_hmac_setup()
1727 if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen))) in atmel_sha_hmac_setup()
1728 return resume(dd); in atmel_sha_hmac_setup()
1735 memcpy((u8 *)hmac->ipad, key, keylen); in atmel_sha_hmac_setup()
1736 memset((u8 *)hmac->ipad + keylen, 0, bs - keylen); in atmel_sha_hmac_setup()
1749 struct ahash_request *req = dd->req; in atmel_sha_hmac_prehash_key_done()
1754 size_t bs = ctx->block_size; in atmel_sha_hmac_prehash_key_done()
1759 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_prehash_key_done()
1760 memset((u8 *)hmac->ipad + ds, 0, bs - ds); in atmel_sha_hmac_prehash_key_done()
1766 struct ahash_request *req = dd->req; in atmel_sha_hmac_compute_ipad_hash()
1770 size_t bs = ctx->block_size; in atmel_sha_hmac_compute_ipad_hash()
1773 unsafe_memcpy(hmac->opad, hmac->ipad, bs, in atmel_sha_hmac_compute_ipad_hash()
1774 "fortified memcpy causes -Wrestrict warning"); in atmel_sha_hmac_compute_ipad_hash()
1776 hmac->ipad[i] ^= 0x36363636; in atmel_sha_hmac_compute_ipad_hash()
1777 hmac->opad[i] ^= 0x5c5c5c5c; in atmel_sha_hmac_compute_ipad_hash()
1780 return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false, in atmel_sha_hmac_compute_ipad_hash()
1786 struct ahash_request *req = dd->req; in atmel_sha_hmac_compute_opad_hash()
1790 size_t bs = ctx->block_size; in atmel_sha_hmac_compute_opad_hash()
1791 size_t hs = ctx->hash_size; in atmel_sha_hmac_compute_opad_hash()
1795 hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_compute_opad_hash()
1796 return atmel_sha_cpu_hash(dd, hmac->opad, bs, false, in atmel_sha_hmac_compute_opad_hash()
1802 struct ahash_request *req = dd->req; in atmel_sha_hmac_setup_done()
1806 size_t hs = ctx->hash_size; in atmel_sha_hmac_setup_done()
1810 hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_hmac_setup_done()
1811 atmel_sha_hmac_key_release(&hmac->hkey); in atmel_sha_hmac_setup_done()
1812 return hmac->resume(dd); in atmel_sha_hmac_setup_done()
1817 struct ahash_request *req = dd->req; in atmel_sha_hmac_start()
1825 switch (ctx->op) { in atmel_sha_hmac_start()
1831 dd->resume = atmel_sha_done; in atmel_sha_hmac_start()
1836 dd->resume = atmel_sha_hmac_final; in atmel_sha_hmac_start()
1845 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_hmac_start()
1856 return atmel_sha_hmac_key_set(&hmac->hkey, key, keylen); in atmel_sha_hmac_setkey()
1872 struct ahash_request *req = dd->req; in atmel_sha_hmac_init_done()
1876 size_t bs = ctx->block_size; in atmel_sha_hmac_init_done()
1877 size_t hs = ctx->hash_size; in atmel_sha_hmac_init_done()
1879 ctx->bufcnt = 0; in atmel_sha_hmac_init_done()
1880 ctx->digcnt[0] = bs; in atmel_sha_hmac_init_done()
1881 ctx->digcnt[1] = 0; in atmel_sha_hmac_init_done()
1882 ctx->flags |= SHA_FLAGS_RESTORE; in atmel_sha_hmac_init_done()
1883 memcpy(ctx->digest, hmac->ipad, hs); in atmel_sha_hmac_init_done()
1889 struct ahash_request *req = dd->req; in atmel_sha_hmac_final()
1893 u32 *digest = (u32 *)ctx->digest; in atmel_sha_hmac_final()
1895 size_t bs = ctx->block_size; in atmel_sha_hmac_final()
1896 size_t hs = ctx->hash_size; in atmel_sha_hmac_final()
1909 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_hmac_final()
1912 mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); in atmel_sha_hmac_final()
1918 sg_init_one(&dd->tmp, digest, ds); in atmel_sha_hmac_final()
1919 return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true, in atmel_sha_hmac_final()
1926 * req->result might not be sizeof(u32) aligned, so copy the in atmel_sha_hmac_final_done()
1927 * digest into ctx->digest[] before memcpy() the data into in atmel_sha_hmac_final_done()
1928 * req->result. in atmel_sha_hmac_final_done()
1930 atmel_sha_copy_hash(dd->req); in atmel_sha_hmac_final_done()
1931 atmel_sha_copy_ready_hash(dd->req); in atmel_sha_hmac_final_done()
1948 struct ahash_request *req = dd->req; in atmel_sha_hmac_digest2()
1953 size_t hs = ctx->hash_size; in atmel_sha_hmac_digest2()
1959 if (!req->nbytes) { in atmel_sha_hmac_digest2()
1960 req->nbytes = 0; in atmel_sha_hmac_digest2()
1961 ctx->bufcnt = 0; in atmel_sha_hmac_digest2()
1962 ctx->digcnt[0] = 0; in atmel_sha_hmac_digest2()
1963 ctx->digcnt[1] = 0; in atmel_sha_hmac_digest2()
1964 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_hmac_digest2()
1976 sg_init_one(&dd->tmp, ctx->buffer, ctx->bufcnt); in atmel_sha_hmac_digest2()
1980 if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD && in atmel_sha_hmac_digest2()
1981 atmel_sha_dma_check_aligned(dd, req->src, req->nbytes)) in atmel_sha_hmac_digest2()
1987 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); in atmel_sha_hmac_digest2()
1991 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_hmac_digest2()
1995 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; in atmel_sha_hmac_digest2()
2002 atmel_sha_write(dd, SHA_MSR, req->nbytes); in atmel_sha_hmac_digest2()
2003 atmel_sha_write(dd, SHA_BCR, req->nbytes); in atmel_sha_hmac_digest2()
2008 if (!req->nbytes) { in atmel_sha_hmac_digest2()
2009 sgbuf = &dd->tmp; in atmel_sha_hmac_digest2()
2010 req->nbytes = ctx->bufcnt; in atmel_sha_hmac_digest2()
2012 sgbuf = req->src; in atmel_sha_hmac_digest2()
2017 return atmel_sha_dma_start(dd, sgbuf, req->nbytes, in atmel_sha_hmac_digest2()
2020 return atmel_sha_cpu_start(dd, sgbuf, req->nbytes, false, true, in atmel_sha_hmac_digest2()
2030 hmac->base.start = atmel_sha_hmac_start; in atmel_sha_hmac_cra_init()
2031 atmel_sha_hmac_key_init(&hmac->hkey); in atmel_sha_hmac_cra_init()
2040 atmel_sha_hmac_key_release(&hmac->hkey); in atmel_sha_hmac_cra_exit()
2045 alg->halg.base.cra_priority = ATMEL_SHA_PRIORITY; in atmel_sha_hmac_alg_init()
2046 alg->halg.base.cra_flags = CRYPTO_ALG_ASYNC; in atmel_sha_hmac_alg_init()
2047 alg->halg.base.cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx); in atmel_sha_hmac_alg_init()
2048 alg->halg.base.cra_module = THIS_MODULE; in atmel_sha_hmac_alg_init()
2049 alg->halg.base.cra_init = atmel_sha_hmac_cra_init; in atmel_sha_hmac_alg_init()
2050 alg->halg.base.cra_exit = atmel_sha_hmac_cra_exit; in atmel_sha_hmac_alg_init()
2052 alg->halg.statesize = sizeof(struct atmel_sha_reqctx); in atmel_sha_hmac_alg_init()
2054 alg->init = atmel_sha_hmac_init; in atmel_sha_hmac_alg_init()
2055 alg->update = atmel_sha_update; in atmel_sha_hmac_alg_init()
2056 alg->final = atmel_sha_final; in atmel_sha_hmac_alg_init()
2057 alg->digest = atmel_sha_hmac_digest; in atmel_sha_hmac_alg_init()
2058 alg->setkey = atmel_sha_hmac_setkey; in atmel_sha_hmac_alg_init()
2059 alg->export = atmel_sha_export; in atmel_sha_hmac_alg_init()
2060 alg->import = atmel_sha_import; in atmel_sha_hmac_alg_init()
2066 .halg.base.cra_driver_name = "atmel-hmac-sha1",
2073 .halg.base.cra_driver_name = "atmel-hmac-sha224",
2080 .halg.base.cra_driver_name = "atmel-hmac-sha256",
2087 .halg.base.cra_driver_name = "atmel-hmac-sha384",
2094 .halg.base.cra_driver_name = "atmel-hmac-sha512",
2134 authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async); in atmel_sha_authenc_complete()
2139 struct ahash_request *req = dd->req; in atmel_sha_authenc_start()
2144 * Force atmel_sha_complete() to call req->base.complete(), ie in atmel_sha_authenc_start()
2145 * atmel_sha_authenc_complete(), which in turn calls authctx->cb(). in atmel_sha_authenc_start()
2147 dd->force_complete = true; in atmel_sha_authenc_start()
2150 return authctx->cb(authctx->aes_dev, err, dd->is_async); in atmel_sha_authenc_start()
2174 int err = -EINVAL; in atmel_sha_authenc_spawn()
2178 name = "atmel-hmac-sha1"; in atmel_sha_authenc_spawn()
2182 name = "atmel-hmac-sha224"; in atmel_sha_authenc_spawn()
2186 name = "atmel-hmac-sha256"; in atmel_sha_authenc_spawn()
2190 name = "atmel-hmac-sha384"; in atmel_sha_authenc_spawn()
2194 name = "atmel-hmac-sha512"; in atmel_sha_authenc_spawn()
2207 tctx->start = atmel_sha_authenc_start; in atmel_sha_authenc_spawn()
2208 tctx->flags = mode; in atmel_sha_authenc_spawn()
2212 err = -ENOMEM; in atmel_sha_authenc_spawn()
2215 auth->tfm = tfm; in atmel_sha_authenc_spawn()
2229 crypto_free_ahash(auth->tfm); in atmel_sha_authenc_free()
2237 struct crypto_ahash *tfm = auth->tfm; in atmel_sha_authenc_setkey()
2251 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_schedule()
2252 struct crypto_ahash *tfm = auth->tfm; in atmel_sha_authenc_schedule()
2262 return cb(aes_dev, -ENODEV, false); in atmel_sha_authenc_schedule()
2265 ctx->dd = dd; in atmel_sha_authenc_schedule()
2266 ctx->buflen = SHA_BUFFER_LEN; in atmel_sha_authenc_schedule()
2267 authctx->cb = cb; in atmel_sha_authenc_schedule()
2268 authctx->aes_dev = aes_dev; in atmel_sha_authenc_schedule()
2283 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_init()
2286 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_init()
2289 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_authenc_init()
2291 authctx->cb = cb; in atmel_sha_authenc_init()
2292 authctx->aes_dev = aes_dev; in atmel_sha_authenc_init()
2293 authctx->assoc = assoc; in atmel_sha_authenc_init()
2294 authctx->assoclen = assoclen; in atmel_sha_authenc_init()
2295 authctx->textlen = textlen; in atmel_sha_authenc_init()
2297 ctx->flags = hmac->base.flags; in atmel_sha_authenc_init()
2304 struct ahash_request *req = dd->req; in atmel_sha_authenc_init2()
2306 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_init2()
2309 size_t hs = ctx->hash_size; in atmel_sha_authenc_init2()
2315 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); in atmel_sha_authenc_init2()
2319 atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); in atmel_sha_authenc_init2()
2324 mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; in atmel_sha_authenc_init2()
2327 msg_size = authctx->assoclen + authctx->textlen; in atmel_sha_authenc_init2()
2334 return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen, in atmel_sha_authenc_init2()
2341 struct ahash_request *req = dd->req; in atmel_sha_authenc_init_done()
2344 return authctx->cb(authctx->aes_dev, 0, dd->is_async); in atmel_sha_authenc_init_done()
2353 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_final()
2354 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_final()
2356 switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { in atmel_sha_authenc_final()
2358 authctx->digestlen = SHA1_DIGEST_SIZE; in atmel_sha_authenc_final()
2362 authctx->digestlen = SHA224_DIGEST_SIZE; in atmel_sha_authenc_final()
2366 authctx->digestlen = SHA256_DIGEST_SIZE; in atmel_sha_authenc_final()
2370 authctx->digestlen = SHA384_DIGEST_SIZE; in atmel_sha_authenc_final()
2374 authctx->digestlen = SHA512_DIGEST_SIZE; in atmel_sha_authenc_final()
2378 return atmel_sha_complete(dd, -EINVAL); in atmel_sha_authenc_final()
2380 if (authctx->digestlen > digestlen) in atmel_sha_authenc_final()
2381 authctx->digestlen = digestlen; in atmel_sha_authenc_final()
2383 authctx->cb = cb; in atmel_sha_authenc_final()
2384 authctx->aes_dev = aes_dev; in atmel_sha_authenc_final()
2385 authctx->digest = digest; in atmel_sha_authenc_final()
2393 struct ahash_request *req = dd->req; in atmel_sha_authenc_final_done()
2395 size_t i, num_words = authctx->digestlen / sizeof(u32); in atmel_sha_authenc_final_done()
2398 authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); in atmel_sha_authenc_final_done()
2406 struct atmel_sha_reqctx *ctx = &authctx->base; in atmel_sha_authenc_abort()
2407 struct atmel_sha_dev *dd = ctx->dd; in atmel_sha_authenc_abort()
2409 /* Prevent atmel_sha_complete() from calling req->base.complete(). */ in atmel_sha_authenc_abort()
2410 dd->is_async = false; in atmel_sha_authenc_abort()
2411 dd->force_complete = false; in atmel_sha_authenc_abort()
2423 if (dd->caps.has_hmac) in atmel_sha_unregister_algs()
2430 if (dd->caps.has_sha224) in atmel_sha_unregister_algs()
2433 if (dd->caps.has_sha_384_512) { in atmel_sha_unregister_algs()
2451 if (dd->caps.has_sha224) { in atmel_sha_register_algs()
2459 if (dd->caps.has_sha_384_512) { in atmel_sha_register_algs()
2469 if (dd->caps.has_hmac) { in atmel_sha_register_algs()
2501 dd->dma_lch_in.chan = dma_request_chan(dd->dev, "tx"); in atmel_sha_dma_init()
2502 if (IS_ERR(dd->dma_lch_in.chan)) { in atmel_sha_dma_init()
2503 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_lch_in.chan), in atmel_sha_dma_init()
2507 dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + in atmel_sha_dma_init()
2509 dd->dma_lch_in.dma_conf.src_maxburst = 1; in atmel_sha_dma_init()
2510 dd->dma_lch_in.dma_conf.src_addr_width = in atmel_sha_dma_init()
2512 dd->dma_lch_in.dma_conf.dst_maxburst = 1; in atmel_sha_dma_init()
2513 dd->dma_lch_in.dma_conf.dst_addr_width = in atmel_sha_dma_init()
2515 dd->dma_lch_in.dma_conf.device_fc = false; in atmel_sha_dma_init()
2522 dma_release_channel(dd->dma_lch_in.chan); in atmel_sha_dma_cleanup()
2528 dd->caps.has_dma = 0; in atmel_sha_get_cap()
2529 dd->caps.has_dualbuff = 0; in atmel_sha_get_cap()
2530 dd->caps.has_sha224 = 0; in atmel_sha_get_cap()
2531 dd->caps.has_sha_384_512 = 0; in atmel_sha_get_cap()
2532 dd->caps.has_uihv = 0; in atmel_sha_get_cap()
2533 dd->caps.has_hmac = 0; in atmel_sha_get_cap()
2536 switch (dd->hw_version & 0xff0) { in atmel_sha_get_cap()
2540 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2541 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2542 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2543 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2544 dd->caps.has_uihv = 1; in atmel_sha_get_cap()
2545 dd->caps.has_hmac = 1; in atmel_sha_get_cap()
2548 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2549 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2550 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2551 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2552 dd->caps.has_uihv = 1; in atmel_sha_get_cap()
2555 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2556 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2557 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2558 dd->caps.has_sha_384_512 = 1; in atmel_sha_get_cap()
2561 dd->caps.has_dma = 1; in atmel_sha_get_cap()
2562 dd->caps.has_dualbuff = 1; in atmel_sha_get_cap()
2563 dd->caps.has_sha224 = 1; in atmel_sha_get_cap()
2568 dev_warn(dd->dev, in atmel_sha_get_cap()
2575 { .compatible = "atmel,at91sam9g46-sha" },
2584 struct device *dev = &pdev->dev; in atmel_sha_probe()
2588 sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); in atmel_sha_probe()
2590 return -ENOMEM; in atmel_sha_probe()
2592 sha_dd->dev = dev; in atmel_sha_probe()
2596 INIT_LIST_HEAD(&sha_dd->list); in atmel_sha_probe()
2597 spin_lock_init(&sha_dd->lock); in atmel_sha_probe()
2599 tasklet_init(&sha_dd->done_task, atmel_sha_done_task, in atmel_sha_probe()
2601 tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task, in atmel_sha_probe()
2604 crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); in atmel_sha_probe()
2606 sha_dd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &sha_res); in atmel_sha_probe()
2607 if (IS_ERR(sha_dd->io_base)) { in atmel_sha_probe()
2608 err = PTR_ERR(sha_dd->io_base); in atmel_sha_probe()
2611 sha_dd->phys_base = sha_res->start; in atmel_sha_probe()
2614 sha_dd->irq = platform_get_irq(pdev, 0); in atmel_sha_probe()
2615 if (sha_dd->irq < 0) { in atmel_sha_probe()
2616 err = sha_dd->irq; in atmel_sha_probe()
2620 err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq, in atmel_sha_probe()
2621 IRQF_SHARED, "atmel-sha", sha_dd); in atmel_sha_probe()
2628 sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk"); in atmel_sha_probe()
2629 if (IS_ERR(sha_dd->iclk)) { in atmel_sha_probe()
2631 err = PTR_ERR(sha_dd->iclk); in atmel_sha_probe()
2635 err = clk_prepare(sha_dd->iclk); in atmel_sha_probe()
2645 if (sha_dd->caps.has_dma) { in atmel_sha_probe()
2651 dma_chan_name(sha_dd->dma_lch_in.chan)); in atmel_sha_probe()
2655 list_add_tail(&sha_dd->list, &atmel_sha.dev_list); in atmel_sha_probe()
2663 sha_dd->caps.has_sha224 ? "/SHA224" : "", in atmel_sha_probe()
2664 sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); in atmel_sha_probe()
2670 list_del(&sha_dd->list); in atmel_sha_probe()
2672 if (sha_dd->caps.has_dma) in atmel_sha_probe()
2675 clk_unprepare(sha_dd->iclk); in atmel_sha_probe()
2677 tasklet_kill(&sha_dd->queue_task); in atmel_sha_probe()
2678 tasklet_kill(&sha_dd->done_task); in atmel_sha_probe()
2688 list_del(&sha_dd->list); in atmel_sha_remove()
2693 tasklet_kill(&sha_dd->queue_task); in atmel_sha_remove()
2694 tasklet_kill(&sha_dd->done_task); in atmel_sha_remove()
2696 if (sha_dd->caps.has_dma) in atmel_sha_remove()
2699 clk_unprepare(sha_dd->iclk); in atmel_sha_remove()
2717 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique");