1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This file is part of STM32 Crypto driver for Linux.
4 *
5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
7 */
8
9 #include <linux/clk.h>
10 #include <linux/crypto.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of_device.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
23
24 #include <crypto/engine.h>
25 #include <crypto/hash.h>
26 #include <crypto/md5.h>
27 #include <crypto/scatterwalk.h>
28 #include <crypto/sha.h>
29 #include <crypto/internal/hash.h>
30
31 #define HASH_CR 0x00
32 #define HASH_DIN 0x04
33 #define HASH_STR 0x08
34 #define HASH_IMR 0x20
35 #define HASH_SR 0x24
36 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
37 #define HASH_HREG(x) (0x310 + ((x) * 0x04))
38 #define HASH_HWCFGR 0x3F0
39 #define HASH_VER 0x3F4
40 #define HASH_ID 0x3F8
41
42 /* Control Register */
43 #define HASH_CR_INIT BIT(2)
44 #define HASH_CR_DMAE BIT(3)
45 #define HASH_CR_DATATYPE_POS 4
46 #define HASH_CR_MODE BIT(6)
47 #define HASH_CR_MDMAT BIT(13)
48 #define HASH_CR_DMAA BIT(14)
49 #define HASH_CR_LKEY BIT(16)
50
51 #define HASH_CR_ALGO_SHA1 0x0
52 #define HASH_CR_ALGO_MD5 0x80
53 #define HASH_CR_ALGO_SHA224 0x40000
54 #define HASH_CR_ALGO_SHA256 0x40080
55
56 /* Interrupt */
57 #define HASH_DINIE BIT(0)
58 #define HASH_DCIE BIT(1)
59
60 /* Interrupt Mask */
61 #define HASH_MASK_CALC_COMPLETION BIT(0)
62 #define HASH_MASK_DATA_INPUT BIT(1)
63
64 /* Context swap register */
65 #define HASH_CSR_REGISTER_NUMBER 53
66
67 /* Status Flags */
68 #define HASH_SR_DATA_INPUT_READY BIT(0)
69 #define HASH_SR_OUTPUT_READY BIT(1)
70 #define HASH_SR_DMA_ACTIVE BIT(2)
71 #define HASH_SR_BUSY BIT(3)
72
73 /* STR Register */
74 #define HASH_STR_NBLW_MASK GENMASK(4, 0)
75 #define HASH_STR_DCAL BIT(8)
76
77 #define HASH_FLAGS_INIT BIT(0)
78 #define HASH_FLAGS_OUTPUT_READY BIT(1)
79 #define HASH_FLAGS_CPU BIT(2)
80 #define HASH_FLAGS_DMA_READY BIT(3)
81 #define HASH_FLAGS_DMA_ACTIVE BIT(4)
82 #define HASH_FLAGS_HMAC_INIT BIT(5)
83 #define HASH_FLAGS_HMAC_FINAL BIT(6)
84 #define HASH_FLAGS_HMAC_KEY BIT(7)
85
86 #define HASH_FLAGS_FINAL BIT(15)
87 #define HASH_FLAGS_FINUP BIT(16)
88 #define HASH_FLAGS_ALGO_MASK GENMASK(21, 18)
89 #define HASH_FLAGS_MD5 BIT(18)
90 #define HASH_FLAGS_SHA1 BIT(19)
91 #define HASH_FLAGS_SHA224 BIT(20)
92 #define HASH_FLAGS_SHA256 BIT(21)
93 #define HASH_FLAGS_ERRORS BIT(22)
94 #define HASH_FLAGS_HMAC BIT(23)
95
96 #define HASH_OP_UPDATE 1
97 #define HASH_OP_FINAL 2
98
99 enum stm32_hash_data_format {
100 HASH_DATA_32_BITS = 0x0,
101 HASH_DATA_16_BITS = 0x1,
102 HASH_DATA_8_BITS = 0x2,
103 HASH_DATA_1_BIT = 0x3
104 };
105
106 #define HASH_BUFLEN 256
107 #define HASH_LONG_KEY 64
108 #define HASH_MAX_KEY_SIZE (SHA256_BLOCK_SIZE * 8)
109 #define HASH_QUEUE_LENGTH 16
110 #define HASH_DMA_THRESHOLD 50
111
112 #define HASH_AUTOSUSPEND_DELAY 50
113
114 struct stm32_hash_ctx {
115 struct crypto_engine_ctx enginectx;
116 struct stm32_hash_dev *hdev;
117 unsigned long flags;
118
119 u8 key[HASH_MAX_KEY_SIZE];
120 int keylen;
121 };
122
123 struct stm32_hash_request_ctx {
124 struct stm32_hash_dev *hdev;
125 unsigned long flags;
126 unsigned long op;
127
128 u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
129 size_t digcnt;
130 size_t bufcnt;
131 size_t buflen;
132
133 /* DMA */
134 struct scatterlist *sg;
135 unsigned int offset;
136 unsigned int total;
137 struct scatterlist sg_key;
138
139 dma_addr_t dma_addr;
140 size_t dma_ct;
141 int nents;
142
143 u8 data_type;
144
145 u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
146
147 /* Export Context */
148 u32 *hw_context;
149 };
150
151 struct stm32_hash_algs_info {
152 struct ahash_alg *algs_list;
153 size_t size;
154 };
155
156 struct stm32_hash_pdata {
157 struct stm32_hash_algs_info *algs_info;
158 size_t algs_info_size;
159 };
160
161 struct stm32_hash_dev {
162 struct list_head list;
163 struct device *dev;
164 struct clk *clk;
165 struct reset_control *rst;
166 void __iomem *io_base;
167 phys_addr_t phys_base;
168 u32 dma_mode;
169 u32 dma_maxburst;
170
171 struct ahash_request *req;
172 struct crypto_engine *engine;
173
174 int err;
175 unsigned long flags;
176
177 struct dma_chan *dma_lch;
178 struct completion dma_completion;
179
180 const struct stm32_hash_pdata *pdata;
181 };
182
183 struct stm32_hash_drv {
184 struct list_head dev_list;
185 spinlock_t lock; /* List protection access */
186 };
187
188 static struct stm32_hash_drv stm32_hash = {
189 .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
190 .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
191 };
192
193 static void stm32_hash_dma_callback(void *param);
194
stm32_hash_read(struct stm32_hash_dev * hdev,u32 offset)195 static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
196 {
197 return readl_relaxed(hdev->io_base + offset);
198 }
199
stm32_hash_write(struct stm32_hash_dev * hdev,u32 offset,u32 value)200 static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
201 u32 offset, u32 value)
202 {
203 writel_relaxed(value, hdev->io_base + offset);
204 }
205
stm32_hash_wait_busy(struct stm32_hash_dev * hdev)206 static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
207 {
208 u32 status;
209
210 return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
211 !(status & HASH_SR_BUSY), 10, 10000);
212 }
213
stm32_hash_set_nblw(struct stm32_hash_dev * hdev,int length)214 static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
215 {
216 u32 reg;
217
218 reg = stm32_hash_read(hdev, HASH_STR);
219 reg &= ~(HASH_STR_NBLW_MASK);
220 reg |= (8U * ((length) % 4U));
221 stm32_hash_write(hdev, HASH_STR, reg);
222 }
223
stm32_hash_write_key(struct stm32_hash_dev * hdev)224 static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
225 {
226 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
227 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
228 u32 reg;
229 int keylen = ctx->keylen;
230 void *key = ctx->key;
231
232 if (keylen) {
233 stm32_hash_set_nblw(hdev, keylen);
234
235 while (keylen > 0) {
236 stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
237 keylen -= 4;
238 key += 4;
239 }
240
241 reg = stm32_hash_read(hdev, HASH_STR);
242 reg |= HASH_STR_DCAL;
243 stm32_hash_write(hdev, HASH_STR, reg);
244
245 return -EINPROGRESS;
246 }
247
248 return 0;
249 }
250
stm32_hash_write_ctrl(struct stm32_hash_dev * hdev)251 static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
252 {
253 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
254 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
255 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
256
257 u32 reg = HASH_CR_INIT;
258
259 if (!(hdev->flags & HASH_FLAGS_INIT)) {
260 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
261 case HASH_FLAGS_MD5:
262 reg |= HASH_CR_ALGO_MD5;
263 break;
264 case HASH_FLAGS_SHA1:
265 reg |= HASH_CR_ALGO_SHA1;
266 break;
267 case HASH_FLAGS_SHA224:
268 reg |= HASH_CR_ALGO_SHA224;
269 break;
270 case HASH_FLAGS_SHA256:
271 reg |= HASH_CR_ALGO_SHA256;
272 break;
273 default:
274 reg |= HASH_CR_ALGO_MD5;
275 }
276
277 reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
278
279 if (rctx->flags & HASH_FLAGS_HMAC) {
280 hdev->flags |= HASH_FLAGS_HMAC;
281 reg |= HASH_CR_MODE;
282 if (ctx->keylen > HASH_LONG_KEY)
283 reg |= HASH_CR_LKEY;
284 }
285
286 stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
287
288 stm32_hash_write(hdev, HASH_CR, reg);
289
290 hdev->flags |= HASH_FLAGS_INIT;
291
292 dev_dbg(hdev->dev, "Write Control %x\n", reg);
293 }
294 }
295
stm32_hash_append_sg(struct stm32_hash_request_ctx * rctx)296 static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
297 {
298 size_t count;
299
300 while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
301 count = min(rctx->sg->length - rctx->offset, rctx->total);
302 count = min(count, rctx->buflen - rctx->bufcnt);
303
304 if (count <= 0) {
305 if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
306 rctx->sg = sg_next(rctx->sg);
307 continue;
308 } else {
309 break;
310 }
311 }
312
313 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
314 rctx->offset, count, 0);
315
316 rctx->bufcnt += count;
317 rctx->offset += count;
318 rctx->total -= count;
319
320 if (rctx->offset == rctx->sg->length) {
321 rctx->sg = sg_next(rctx->sg);
322 if (rctx->sg)
323 rctx->offset = 0;
324 else
325 rctx->total = 0;
326 }
327 }
328 }
329
stm32_hash_xmit_cpu(struct stm32_hash_dev * hdev,const u8 * buf,size_t length,int final)330 static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
331 const u8 *buf, size_t length, int final)
332 {
333 unsigned int count, len32;
334 const u32 *buffer = (const u32 *)buf;
335 u32 reg;
336
337 if (final)
338 hdev->flags |= HASH_FLAGS_FINAL;
339
340 len32 = DIV_ROUND_UP(length, sizeof(u32));
341
342 dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
343 __func__, length, final, len32);
344
345 hdev->flags |= HASH_FLAGS_CPU;
346
347 stm32_hash_write_ctrl(hdev);
348
349 if (stm32_hash_wait_busy(hdev))
350 return -ETIMEDOUT;
351
352 if ((hdev->flags & HASH_FLAGS_HMAC) &&
353 (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
354 hdev->flags |= HASH_FLAGS_HMAC_KEY;
355 stm32_hash_write_key(hdev);
356 if (stm32_hash_wait_busy(hdev))
357 return -ETIMEDOUT;
358 }
359
360 for (count = 0; count < len32; count++)
361 stm32_hash_write(hdev, HASH_DIN, buffer[count]);
362
363 if (final) {
364 stm32_hash_set_nblw(hdev, length);
365 reg = stm32_hash_read(hdev, HASH_STR);
366 reg |= HASH_STR_DCAL;
367 stm32_hash_write(hdev, HASH_STR, reg);
368 if (hdev->flags & HASH_FLAGS_HMAC) {
369 if (stm32_hash_wait_busy(hdev))
370 return -ETIMEDOUT;
371 stm32_hash_write_key(hdev);
372 }
373 return -EINPROGRESS;
374 }
375
376 return 0;
377 }
378
stm32_hash_update_cpu(struct stm32_hash_dev * hdev)379 static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
380 {
381 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
382 int bufcnt, err = 0, final;
383
384 dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
385
386 final = (rctx->flags & HASH_FLAGS_FINUP);
387
388 while ((rctx->total >= rctx->buflen) ||
389 (rctx->bufcnt + rctx->total >= rctx->buflen)) {
390 stm32_hash_append_sg(rctx);
391 bufcnt = rctx->bufcnt;
392 rctx->bufcnt = 0;
393 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
394 }
395
396 stm32_hash_append_sg(rctx);
397
398 if (final) {
399 bufcnt = rctx->bufcnt;
400 rctx->bufcnt = 0;
401 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
402 (rctx->flags & HASH_FLAGS_FINUP));
403 }
404
405 return err;
406 }
407
stm32_hash_xmit_dma(struct stm32_hash_dev * hdev,struct scatterlist * sg,int length,int mdma)408 static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
409 struct scatterlist *sg, int length, int mdma)
410 {
411 struct dma_async_tx_descriptor *in_desc;
412 dma_cookie_t cookie;
413 u32 reg;
414 int err;
415
416 in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
417 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
418 DMA_CTRL_ACK);
419 if (!in_desc) {
420 dev_err(hdev->dev, "dmaengine_prep_slave error\n");
421 return -ENOMEM;
422 }
423
424 reinit_completion(&hdev->dma_completion);
425 in_desc->callback = stm32_hash_dma_callback;
426 in_desc->callback_param = hdev;
427
428 hdev->flags |= HASH_FLAGS_FINAL;
429 hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
430
431 reg = stm32_hash_read(hdev, HASH_CR);
432
433 if (mdma)
434 reg |= HASH_CR_MDMAT;
435 else
436 reg &= ~HASH_CR_MDMAT;
437
438 reg |= HASH_CR_DMAE;
439
440 stm32_hash_write(hdev, HASH_CR, reg);
441
442 stm32_hash_set_nblw(hdev, length);
443
444 cookie = dmaengine_submit(in_desc);
445 err = dma_submit_error(cookie);
446 if (err)
447 return -ENOMEM;
448
449 dma_async_issue_pending(hdev->dma_lch);
450
451 if (!wait_for_completion_timeout(&hdev->dma_completion,
452 msecs_to_jiffies(100)))
453 err = -ETIMEDOUT;
454
455 if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
456 NULL, NULL) != DMA_COMPLETE)
457 err = -ETIMEDOUT;
458
459 if (err) {
460 dev_err(hdev->dev, "DMA Error %i\n", err);
461 dmaengine_terminate_all(hdev->dma_lch);
462 return err;
463 }
464
465 return -EINPROGRESS;
466 }
467
stm32_hash_dma_callback(void * param)468 static void stm32_hash_dma_callback(void *param)
469 {
470 struct stm32_hash_dev *hdev = param;
471
472 complete(&hdev->dma_completion);
473
474 hdev->flags |= HASH_FLAGS_DMA_READY;
475 }
476
stm32_hash_hmac_dma_send(struct stm32_hash_dev * hdev)477 static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
478 {
479 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
480 struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
481 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
482 int err;
483
484 if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
485 err = stm32_hash_write_key(hdev);
486 if (stm32_hash_wait_busy(hdev))
487 return -ETIMEDOUT;
488 } else {
489 if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
490 sg_init_one(&rctx->sg_key, ctx->key,
491 ALIGN(ctx->keylen, sizeof(u32)));
492
493 rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
494 DMA_TO_DEVICE);
495 if (rctx->dma_ct == 0) {
496 dev_err(hdev->dev, "dma_map_sg error\n");
497 return -ENOMEM;
498 }
499
500 err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
501
502 dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
503 }
504
505 return err;
506 }
507
stm32_hash_dma_init(struct stm32_hash_dev * hdev)508 static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
509 {
510 struct dma_slave_config dma_conf;
511 struct dma_chan *chan;
512 int err;
513
514 memset(&dma_conf, 0, sizeof(dma_conf));
515
516 dma_conf.direction = DMA_MEM_TO_DEV;
517 dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
518 dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
519 dma_conf.src_maxburst = hdev->dma_maxburst;
520 dma_conf.dst_maxburst = hdev->dma_maxburst;
521 dma_conf.device_fc = false;
522
523 chan = dma_request_chan(hdev->dev, "in");
524 if (IS_ERR(chan))
525 return PTR_ERR(chan);
526
527 hdev->dma_lch = chan;
528
529 err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
530 if (err) {
531 dma_release_channel(hdev->dma_lch);
532 hdev->dma_lch = NULL;
533 dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
534 return err;
535 }
536
537 init_completion(&hdev->dma_completion);
538
539 return 0;
540 }
541
stm32_hash_dma_send(struct stm32_hash_dev * hdev)542 static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
543 {
544 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
545 struct scatterlist sg[1], *tsg;
546 int err = 0, len = 0, reg, ncp = 0;
547 unsigned int i;
548 u32 *buffer = (void *)rctx->buffer;
549
550 rctx->sg = hdev->req->src;
551 rctx->total = hdev->req->nbytes;
552
553 rctx->nents = sg_nents(rctx->sg);
554
555 if (rctx->nents < 0)
556 return -EINVAL;
557
558 stm32_hash_write_ctrl(hdev);
559
560 if (hdev->flags & HASH_FLAGS_HMAC) {
561 err = stm32_hash_hmac_dma_send(hdev);
562 if (err != -EINPROGRESS)
563 return err;
564 }
565
566 for_each_sg(rctx->sg, tsg, rctx->nents, i) {
567 len = sg->length;
568
569 sg[0] = *tsg;
570 if (sg_is_last(sg)) {
571 if (hdev->dma_mode == 1) {
572 len = (ALIGN(sg->length, 16) - 16);
573
574 ncp = sg_pcopy_to_buffer(
575 rctx->sg, rctx->nents,
576 rctx->buffer, sg->length - len,
577 rctx->total - sg->length + len);
578
579 sg->length = len;
580 } else {
581 if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
582 len = sg->length;
583 sg->length = ALIGN(sg->length,
584 sizeof(u32));
585 }
586 }
587 }
588
589 rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
590 DMA_TO_DEVICE);
591 if (rctx->dma_ct == 0) {
592 dev_err(hdev->dev, "dma_map_sg error\n");
593 return -ENOMEM;
594 }
595
596 err = stm32_hash_xmit_dma(hdev, sg, len,
597 !sg_is_last(sg));
598
599 dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
600
601 if (err == -ENOMEM)
602 return err;
603 }
604
605 if (hdev->dma_mode == 1) {
606 if (stm32_hash_wait_busy(hdev))
607 return -ETIMEDOUT;
608 reg = stm32_hash_read(hdev, HASH_CR);
609 reg &= ~HASH_CR_DMAE;
610 reg |= HASH_CR_DMAA;
611 stm32_hash_write(hdev, HASH_CR, reg);
612
613 if (ncp) {
614 memset(buffer + ncp, 0,
615 DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
616 writesl(hdev->io_base + HASH_DIN, buffer,
617 DIV_ROUND_UP(ncp, sizeof(u32)));
618 }
619 stm32_hash_set_nblw(hdev, ncp);
620 reg = stm32_hash_read(hdev, HASH_STR);
621 reg |= HASH_STR_DCAL;
622 stm32_hash_write(hdev, HASH_STR, reg);
623 err = -EINPROGRESS;
624 }
625
626 if (hdev->flags & HASH_FLAGS_HMAC) {
627 if (stm32_hash_wait_busy(hdev))
628 return -ETIMEDOUT;
629 err = stm32_hash_hmac_dma_send(hdev);
630 }
631
632 return err;
633 }
634
stm32_hash_find_dev(struct stm32_hash_ctx * ctx)635 static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
636 {
637 struct stm32_hash_dev *hdev = NULL, *tmp;
638
639 spin_lock_bh(&stm32_hash.lock);
640 if (!ctx->hdev) {
641 list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
642 hdev = tmp;
643 break;
644 }
645 ctx->hdev = hdev;
646 } else {
647 hdev = ctx->hdev;
648 }
649
650 spin_unlock_bh(&stm32_hash.lock);
651
652 return hdev;
653 }
654
stm32_hash_dma_aligned_data(struct ahash_request * req)655 static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
656 {
657 struct scatterlist *sg;
658 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
659 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
660 int i;
661
662 if (req->nbytes <= HASH_DMA_THRESHOLD)
663 return false;
664
665 if (sg_nents(req->src) > 1) {
666 if (hdev->dma_mode == 1)
667 return false;
668 for_each_sg(req->src, sg, sg_nents(req->src), i) {
669 if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
670 (!sg_is_last(sg)))
671 return false;
672 }
673 }
674
675 if (req->src->offset % 4)
676 return false;
677
678 return true;
679 }
680
stm32_hash_init(struct ahash_request * req)681 static int stm32_hash_init(struct ahash_request *req)
682 {
683 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
684 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
685 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
686 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
687
688 rctx->hdev = hdev;
689
690 rctx->flags = HASH_FLAGS_CPU;
691
692 rctx->digcnt = crypto_ahash_digestsize(tfm);
693 switch (rctx->digcnt) {
694 case MD5_DIGEST_SIZE:
695 rctx->flags |= HASH_FLAGS_MD5;
696 break;
697 case SHA1_DIGEST_SIZE:
698 rctx->flags |= HASH_FLAGS_SHA1;
699 break;
700 case SHA224_DIGEST_SIZE:
701 rctx->flags |= HASH_FLAGS_SHA224;
702 break;
703 case SHA256_DIGEST_SIZE:
704 rctx->flags |= HASH_FLAGS_SHA256;
705 break;
706 default:
707 return -EINVAL;
708 }
709
710 rctx->bufcnt = 0;
711 rctx->buflen = HASH_BUFLEN;
712 rctx->total = 0;
713 rctx->offset = 0;
714 rctx->data_type = HASH_DATA_8_BITS;
715
716 memset(rctx->buffer, 0, HASH_BUFLEN);
717
718 if (ctx->flags & HASH_FLAGS_HMAC)
719 rctx->flags |= HASH_FLAGS_HMAC;
720
721 dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
722
723 return 0;
724 }
725
stm32_hash_update_req(struct stm32_hash_dev * hdev)726 static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
727 {
728 return stm32_hash_update_cpu(hdev);
729 }
730
stm32_hash_final_req(struct stm32_hash_dev * hdev)731 static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
732 {
733 struct ahash_request *req = hdev->req;
734 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
735 int err;
736 int buflen = rctx->bufcnt;
737
738 rctx->bufcnt = 0;
739
740 if (!(rctx->flags & HASH_FLAGS_CPU))
741 err = stm32_hash_dma_send(hdev);
742 else
743 err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
744
745
746 return err;
747 }
748
stm32_hash_copy_hash(struct ahash_request * req)749 static void stm32_hash_copy_hash(struct ahash_request *req)
750 {
751 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
752 __be32 *hash = (void *)rctx->digest;
753 unsigned int i, hashsize;
754
755 switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
756 case HASH_FLAGS_MD5:
757 hashsize = MD5_DIGEST_SIZE;
758 break;
759 case HASH_FLAGS_SHA1:
760 hashsize = SHA1_DIGEST_SIZE;
761 break;
762 case HASH_FLAGS_SHA224:
763 hashsize = SHA224_DIGEST_SIZE;
764 break;
765 case HASH_FLAGS_SHA256:
766 hashsize = SHA256_DIGEST_SIZE;
767 break;
768 default:
769 return;
770 }
771
772 for (i = 0; i < hashsize / sizeof(u32); i++)
773 hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev,
774 HASH_HREG(i)));
775 }
776
stm32_hash_finish(struct ahash_request * req)777 static int stm32_hash_finish(struct ahash_request *req)
778 {
779 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
780
781 if (!req->result)
782 return -EINVAL;
783
784 memcpy(req->result, rctx->digest, rctx->digcnt);
785
786 return 0;
787 }
788
stm32_hash_finish_req(struct ahash_request * req,int err)789 static void stm32_hash_finish_req(struct ahash_request *req, int err)
790 {
791 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
792 struct stm32_hash_dev *hdev = rctx->hdev;
793
794 if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
795 stm32_hash_copy_hash(req);
796 err = stm32_hash_finish(req);
797 hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
798 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
799 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
800 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
801 HASH_FLAGS_HMAC_KEY);
802 } else {
803 rctx->flags |= HASH_FLAGS_ERRORS;
804 }
805
806 pm_runtime_mark_last_busy(hdev->dev);
807 pm_runtime_put_autosuspend(hdev->dev);
808
809 crypto_finalize_hash_request(hdev->engine, req, err);
810 }
811
stm32_hash_hw_init(struct stm32_hash_dev * hdev,struct stm32_hash_request_ctx * rctx)812 static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
813 struct stm32_hash_request_ctx *rctx)
814 {
815 pm_runtime_get_sync(hdev->dev);
816
817 if (!(HASH_FLAGS_INIT & hdev->flags)) {
818 stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
819 stm32_hash_write(hdev, HASH_STR, 0);
820 stm32_hash_write(hdev, HASH_DIN, 0);
821 stm32_hash_write(hdev, HASH_IMR, 0);
822 hdev->err = 0;
823 }
824
825 return 0;
826 }
827
828 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
829 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
830
stm32_hash_handle_queue(struct stm32_hash_dev * hdev,struct ahash_request * req)831 static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
832 struct ahash_request *req)
833 {
834 return crypto_transfer_hash_request_to_engine(hdev->engine, req);
835 }
836
stm32_hash_prepare_req(struct crypto_engine * engine,void * areq)837 static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
838 {
839 struct ahash_request *req = container_of(areq, struct ahash_request,
840 base);
841 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
842 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
843 struct stm32_hash_request_ctx *rctx;
844
845 if (!hdev)
846 return -ENODEV;
847
848 hdev->req = req;
849
850 rctx = ahash_request_ctx(req);
851
852 dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
853 rctx->op, req->nbytes);
854
855 return stm32_hash_hw_init(hdev, rctx);
856 }
857
stm32_hash_one_request(struct crypto_engine * engine,void * areq)858 static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
859 {
860 struct ahash_request *req = container_of(areq, struct ahash_request,
861 base);
862 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
863 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
864 struct stm32_hash_request_ctx *rctx;
865 int err = 0;
866
867 if (!hdev)
868 return -ENODEV;
869
870 hdev->req = req;
871
872 rctx = ahash_request_ctx(req);
873
874 if (rctx->op == HASH_OP_UPDATE)
875 err = stm32_hash_update_req(hdev);
876 else if (rctx->op == HASH_OP_FINAL)
877 err = stm32_hash_final_req(hdev);
878
879 if (err != -EINPROGRESS)
880 /* done task will not finish it, so do it here */
881 stm32_hash_finish_req(req, err);
882
883 return 0;
884 }
885
stm32_hash_enqueue(struct ahash_request * req,unsigned int op)886 static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
887 {
888 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
889 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
890 struct stm32_hash_dev *hdev = ctx->hdev;
891
892 rctx->op = op;
893
894 return stm32_hash_handle_queue(hdev, req);
895 }
896
stm32_hash_update(struct ahash_request * req)897 static int stm32_hash_update(struct ahash_request *req)
898 {
899 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
900
901 if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
902 return 0;
903
904 rctx->total = req->nbytes;
905 rctx->sg = req->src;
906 rctx->offset = 0;
907
908 if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
909 stm32_hash_append_sg(rctx);
910 return 0;
911 }
912
913 return stm32_hash_enqueue(req, HASH_OP_UPDATE);
914 }
915
stm32_hash_final(struct ahash_request * req)916 static int stm32_hash_final(struct ahash_request *req)
917 {
918 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
919
920 rctx->flags |= HASH_FLAGS_FINUP;
921
922 return stm32_hash_enqueue(req, HASH_OP_FINAL);
923 }
924
stm32_hash_finup(struct ahash_request * req)925 static int stm32_hash_finup(struct ahash_request *req)
926 {
927 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
928 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
929 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
930 int err1, err2;
931
932 rctx->flags |= HASH_FLAGS_FINUP;
933
934 if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
935 rctx->flags &= ~HASH_FLAGS_CPU;
936
937 err1 = stm32_hash_update(req);
938
939 if (err1 == -EINPROGRESS || err1 == -EBUSY)
940 return err1;
941
942 /*
943 * final() has to be always called to cleanup resources
944 * even if update() failed, except EINPROGRESS
945 */
946 err2 = stm32_hash_final(req);
947
948 return err1 ?: err2;
949 }
950
stm32_hash_digest(struct ahash_request * req)951 static int stm32_hash_digest(struct ahash_request *req)
952 {
953 return stm32_hash_init(req) ?: stm32_hash_finup(req);
954 }
955
stm32_hash_export(struct ahash_request * req,void * out)956 static int stm32_hash_export(struct ahash_request *req, void *out)
957 {
958 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
959 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
960 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
961 u32 *preg;
962 unsigned int i;
963
964 pm_runtime_get_sync(hdev->dev);
965
966 while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
967 cpu_relax();
968
969 rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
970 sizeof(u32),
971 GFP_KERNEL);
972
973 preg = rctx->hw_context;
974
975 *preg++ = stm32_hash_read(hdev, HASH_IMR);
976 *preg++ = stm32_hash_read(hdev, HASH_STR);
977 *preg++ = stm32_hash_read(hdev, HASH_CR);
978 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
979 *preg++ = stm32_hash_read(hdev, HASH_CSR(i));
980
981 pm_runtime_mark_last_busy(hdev->dev);
982 pm_runtime_put_autosuspend(hdev->dev);
983
984 memcpy(out, rctx, sizeof(*rctx));
985
986 return 0;
987 }
988
stm32_hash_import(struct ahash_request * req,const void * in)989 static int stm32_hash_import(struct ahash_request *req, const void *in)
990 {
991 struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
992 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
993 struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
994 const u32 *preg = in;
995 u32 reg;
996 unsigned int i;
997
998 memcpy(rctx, in, sizeof(*rctx));
999
1000 preg = rctx->hw_context;
1001
1002 pm_runtime_get_sync(hdev->dev);
1003
1004 stm32_hash_write(hdev, HASH_IMR, *preg++);
1005 stm32_hash_write(hdev, HASH_STR, *preg++);
1006 stm32_hash_write(hdev, HASH_CR, *preg);
1007 reg = *preg++ | HASH_CR_INIT;
1008 stm32_hash_write(hdev, HASH_CR, reg);
1009
1010 for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1011 stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1012
1013 pm_runtime_mark_last_busy(hdev->dev);
1014 pm_runtime_put_autosuspend(hdev->dev);
1015
1016 kfree(rctx->hw_context);
1017
1018 return 0;
1019 }
1020
stm32_hash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1021 static int stm32_hash_setkey(struct crypto_ahash *tfm,
1022 const u8 *key, unsigned int keylen)
1023 {
1024 struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1025
1026 if (keylen <= HASH_MAX_KEY_SIZE) {
1027 memcpy(ctx->key, key, keylen);
1028 ctx->keylen = keylen;
1029 } else {
1030 return -ENOMEM;
1031 }
1032
1033 return 0;
1034 }
1035
stm32_hash_cra_init_algs(struct crypto_tfm * tfm,const char * algs_hmac_name)1036 static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1037 const char *algs_hmac_name)
1038 {
1039 struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1040
1041 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1042 sizeof(struct stm32_hash_request_ctx));
1043
1044 ctx->keylen = 0;
1045
1046 if (algs_hmac_name)
1047 ctx->flags |= HASH_FLAGS_HMAC;
1048
1049 ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1050 ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1051 ctx->enginectx.op.unprepare_request = NULL;
1052 return 0;
1053 }
1054
stm32_hash_cra_init(struct crypto_tfm * tfm)1055 static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1056 {
1057 return stm32_hash_cra_init_algs(tfm, NULL);
1058 }
1059
stm32_hash_cra_md5_init(struct crypto_tfm * tfm)1060 static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1061 {
1062 return stm32_hash_cra_init_algs(tfm, "md5");
1063 }
1064
stm32_hash_cra_sha1_init(struct crypto_tfm * tfm)1065 static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1066 {
1067 return stm32_hash_cra_init_algs(tfm, "sha1");
1068 }
1069
stm32_hash_cra_sha224_init(struct crypto_tfm * tfm)1070 static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1071 {
1072 return stm32_hash_cra_init_algs(tfm, "sha224");
1073 }
1074
stm32_hash_cra_sha256_init(struct crypto_tfm * tfm)1075 static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1076 {
1077 return stm32_hash_cra_init_algs(tfm, "sha256");
1078 }
1079
stm32_hash_irq_thread(int irq,void * dev_id)1080 static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1081 {
1082 struct stm32_hash_dev *hdev = dev_id;
1083
1084 if (HASH_FLAGS_CPU & hdev->flags) {
1085 if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1086 hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1087 goto finish;
1088 }
1089 } else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1090 if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1091 hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1092 goto finish;
1093 }
1094 }
1095
1096 return IRQ_HANDLED;
1097
1098 finish:
1099 /* Finish current request */
1100 stm32_hash_finish_req(hdev->req, 0);
1101
1102 return IRQ_HANDLED;
1103 }
1104
stm32_hash_irq_handler(int irq,void * dev_id)1105 static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1106 {
1107 struct stm32_hash_dev *hdev = dev_id;
1108 u32 reg;
1109
1110 reg = stm32_hash_read(hdev, HASH_SR);
1111 if (reg & HASH_SR_OUTPUT_READY) {
1112 reg &= ~HASH_SR_OUTPUT_READY;
1113 stm32_hash_write(hdev, HASH_SR, reg);
1114 hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1115 /* Disable IT*/
1116 stm32_hash_write(hdev, HASH_IMR, 0);
1117 return IRQ_WAKE_THREAD;
1118 }
1119
1120 return IRQ_NONE;
1121 }
1122
1123 static struct ahash_alg algs_md5_sha1[] = {
1124 {
1125 .init = stm32_hash_init,
1126 .update = stm32_hash_update,
1127 .final = stm32_hash_final,
1128 .finup = stm32_hash_finup,
1129 .digest = stm32_hash_digest,
1130 .export = stm32_hash_export,
1131 .import = stm32_hash_import,
1132 .halg = {
1133 .digestsize = MD5_DIGEST_SIZE,
1134 .statesize = sizeof(struct stm32_hash_request_ctx),
1135 .base = {
1136 .cra_name = "md5",
1137 .cra_driver_name = "stm32-md5",
1138 .cra_priority = 200,
1139 .cra_flags = CRYPTO_ALG_ASYNC |
1140 CRYPTO_ALG_KERN_DRIVER_ONLY,
1141 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1142 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1143 .cra_alignmask = 3,
1144 .cra_init = stm32_hash_cra_init,
1145 .cra_module = THIS_MODULE,
1146 }
1147 }
1148 },
1149 {
1150 .init = stm32_hash_init,
1151 .update = stm32_hash_update,
1152 .final = stm32_hash_final,
1153 .finup = stm32_hash_finup,
1154 .digest = stm32_hash_digest,
1155 .export = stm32_hash_export,
1156 .import = stm32_hash_import,
1157 .setkey = stm32_hash_setkey,
1158 .halg = {
1159 .digestsize = MD5_DIGEST_SIZE,
1160 .statesize = sizeof(struct stm32_hash_request_ctx),
1161 .base = {
1162 .cra_name = "hmac(md5)",
1163 .cra_driver_name = "stm32-hmac-md5",
1164 .cra_priority = 200,
1165 .cra_flags = CRYPTO_ALG_ASYNC |
1166 CRYPTO_ALG_KERN_DRIVER_ONLY,
1167 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1168 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1169 .cra_alignmask = 3,
1170 .cra_init = stm32_hash_cra_md5_init,
1171 .cra_module = THIS_MODULE,
1172 }
1173 }
1174 },
1175 {
1176 .init = stm32_hash_init,
1177 .update = stm32_hash_update,
1178 .final = stm32_hash_final,
1179 .finup = stm32_hash_finup,
1180 .digest = stm32_hash_digest,
1181 .export = stm32_hash_export,
1182 .import = stm32_hash_import,
1183 .halg = {
1184 .digestsize = SHA1_DIGEST_SIZE,
1185 .statesize = sizeof(struct stm32_hash_request_ctx),
1186 .base = {
1187 .cra_name = "sha1",
1188 .cra_driver_name = "stm32-sha1",
1189 .cra_priority = 200,
1190 .cra_flags = CRYPTO_ALG_ASYNC |
1191 CRYPTO_ALG_KERN_DRIVER_ONLY,
1192 .cra_blocksize = SHA1_BLOCK_SIZE,
1193 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1194 .cra_alignmask = 3,
1195 .cra_init = stm32_hash_cra_init,
1196 .cra_module = THIS_MODULE,
1197 }
1198 }
1199 },
1200 {
1201 .init = stm32_hash_init,
1202 .update = stm32_hash_update,
1203 .final = stm32_hash_final,
1204 .finup = stm32_hash_finup,
1205 .digest = stm32_hash_digest,
1206 .export = stm32_hash_export,
1207 .import = stm32_hash_import,
1208 .setkey = stm32_hash_setkey,
1209 .halg = {
1210 .digestsize = SHA1_DIGEST_SIZE,
1211 .statesize = sizeof(struct stm32_hash_request_ctx),
1212 .base = {
1213 .cra_name = "hmac(sha1)",
1214 .cra_driver_name = "stm32-hmac-sha1",
1215 .cra_priority = 200,
1216 .cra_flags = CRYPTO_ALG_ASYNC |
1217 CRYPTO_ALG_KERN_DRIVER_ONLY,
1218 .cra_blocksize = SHA1_BLOCK_SIZE,
1219 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1220 .cra_alignmask = 3,
1221 .cra_init = stm32_hash_cra_sha1_init,
1222 .cra_module = THIS_MODULE,
1223 }
1224 }
1225 },
1226 };
1227
1228 static struct ahash_alg algs_sha224_sha256[] = {
1229 {
1230 .init = stm32_hash_init,
1231 .update = stm32_hash_update,
1232 .final = stm32_hash_final,
1233 .finup = stm32_hash_finup,
1234 .digest = stm32_hash_digest,
1235 .export = stm32_hash_export,
1236 .import = stm32_hash_import,
1237 .halg = {
1238 .digestsize = SHA224_DIGEST_SIZE,
1239 .statesize = sizeof(struct stm32_hash_request_ctx),
1240 .base = {
1241 .cra_name = "sha224",
1242 .cra_driver_name = "stm32-sha224",
1243 .cra_priority = 200,
1244 .cra_flags = CRYPTO_ALG_ASYNC |
1245 CRYPTO_ALG_KERN_DRIVER_ONLY,
1246 .cra_blocksize = SHA224_BLOCK_SIZE,
1247 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1248 .cra_alignmask = 3,
1249 .cra_init = stm32_hash_cra_init,
1250 .cra_module = THIS_MODULE,
1251 }
1252 }
1253 },
1254 {
1255 .init = stm32_hash_init,
1256 .update = stm32_hash_update,
1257 .final = stm32_hash_final,
1258 .finup = stm32_hash_finup,
1259 .digest = stm32_hash_digest,
1260 .setkey = stm32_hash_setkey,
1261 .export = stm32_hash_export,
1262 .import = stm32_hash_import,
1263 .halg = {
1264 .digestsize = SHA224_DIGEST_SIZE,
1265 .statesize = sizeof(struct stm32_hash_request_ctx),
1266 .base = {
1267 .cra_name = "hmac(sha224)",
1268 .cra_driver_name = "stm32-hmac-sha224",
1269 .cra_priority = 200,
1270 .cra_flags = CRYPTO_ALG_ASYNC |
1271 CRYPTO_ALG_KERN_DRIVER_ONLY,
1272 .cra_blocksize = SHA224_BLOCK_SIZE,
1273 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1274 .cra_alignmask = 3,
1275 .cra_init = stm32_hash_cra_sha224_init,
1276 .cra_module = THIS_MODULE,
1277 }
1278 }
1279 },
1280 {
1281 .init = stm32_hash_init,
1282 .update = stm32_hash_update,
1283 .final = stm32_hash_final,
1284 .finup = stm32_hash_finup,
1285 .digest = stm32_hash_digest,
1286 .export = stm32_hash_export,
1287 .import = stm32_hash_import,
1288 .halg = {
1289 .digestsize = SHA256_DIGEST_SIZE,
1290 .statesize = sizeof(struct stm32_hash_request_ctx),
1291 .base = {
1292 .cra_name = "sha256",
1293 .cra_driver_name = "stm32-sha256",
1294 .cra_priority = 200,
1295 .cra_flags = CRYPTO_ALG_ASYNC |
1296 CRYPTO_ALG_KERN_DRIVER_ONLY,
1297 .cra_blocksize = SHA256_BLOCK_SIZE,
1298 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1299 .cra_alignmask = 3,
1300 .cra_init = stm32_hash_cra_init,
1301 .cra_module = THIS_MODULE,
1302 }
1303 }
1304 },
1305 {
1306 .init = stm32_hash_init,
1307 .update = stm32_hash_update,
1308 .final = stm32_hash_final,
1309 .finup = stm32_hash_finup,
1310 .digest = stm32_hash_digest,
1311 .export = stm32_hash_export,
1312 .import = stm32_hash_import,
1313 .setkey = stm32_hash_setkey,
1314 .halg = {
1315 .digestsize = SHA256_DIGEST_SIZE,
1316 .statesize = sizeof(struct stm32_hash_request_ctx),
1317 .base = {
1318 .cra_name = "hmac(sha256)",
1319 .cra_driver_name = "stm32-hmac-sha256",
1320 .cra_priority = 200,
1321 .cra_flags = CRYPTO_ALG_ASYNC |
1322 CRYPTO_ALG_KERN_DRIVER_ONLY,
1323 .cra_blocksize = SHA256_BLOCK_SIZE,
1324 .cra_ctxsize = sizeof(struct stm32_hash_ctx),
1325 .cra_alignmask = 3,
1326 .cra_init = stm32_hash_cra_sha256_init,
1327 .cra_module = THIS_MODULE,
1328 }
1329 }
1330 },
1331 };
1332
stm32_hash_register_algs(struct stm32_hash_dev * hdev)1333 static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1334 {
1335 unsigned int i, j;
1336 int err;
1337
1338 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1339 for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1340 err = crypto_register_ahash(
1341 &hdev->pdata->algs_info[i].algs_list[j]);
1342 if (err)
1343 goto err_algs;
1344 }
1345 }
1346
1347 return 0;
1348 err_algs:
1349 dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1350 for (; i--; ) {
1351 for (; j--;)
1352 crypto_unregister_ahash(
1353 &hdev->pdata->algs_info[i].algs_list[j]);
1354 }
1355
1356 return err;
1357 }
1358
stm32_hash_unregister_algs(struct stm32_hash_dev * hdev)1359 static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1360 {
1361 unsigned int i, j;
1362
1363 for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1364 for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1365 crypto_unregister_ahash(
1366 &hdev->pdata->algs_info[i].algs_list[j]);
1367 }
1368
1369 return 0;
1370 }
1371
1372 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1373 {
1374 .algs_list = algs_md5_sha1,
1375 .size = ARRAY_SIZE(algs_md5_sha1),
1376 },
1377 };
1378
1379 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
1380 .algs_info = stm32_hash_algs_info_stm32f4,
1381 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
1382 };
1383
1384 static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1385 {
1386 .algs_list = algs_md5_sha1,
1387 .size = ARRAY_SIZE(algs_md5_sha1),
1388 },
1389 {
1390 .algs_list = algs_sha224_sha256,
1391 .size = ARRAY_SIZE(algs_sha224_sha256),
1392 },
1393 };
1394
1395 static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
1396 .algs_info = stm32_hash_algs_info_stm32f7,
1397 .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
1398 };
1399
1400 static const struct of_device_id stm32_hash_of_match[] = {
1401 {
1402 .compatible = "st,stm32f456-hash",
1403 .data = &stm32_hash_pdata_stm32f4,
1404 },
1405 {
1406 .compatible = "st,stm32f756-hash",
1407 .data = &stm32_hash_pdata_stm32f7,
1408 },
1409 {},
1410 };
1411
1412 MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1413
stm32_hash_get_of_match(struct stm32_hash_dev * hdev,struct device * dev)1414 static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1415 struct device *dev)
1416 {
1417 hdev->pdata = of_device_get_match_data(dev);
1418 if (!hdev->pdata) {
1419 dev_err(dev, "no compatible OF match\n");
1420 return -EINVAL;
1421 }
1422
1423 if (of_property_read_u32(dev->of_node, "dma-maxburst",
1424 &hdev->dma_maxburst)) {
1425 dev_info(dev, "dma-maxburst not specified, using 0\n");
1426 hdev->dma_maxburst = 0;
1427 }
1428
1429 return 0;
1430 }
1431
stm32_hash_probe(struct platform_device * pdev)1432 static int stm32_hash_probe(struct platform_device *pdev)
1433 {
1434 struct stm32_hash_dev *hdev;
1435 struct device *dev = &pdev->dev;
1436 struct resource *res;
1437 int ret, irq;
1438
1439 hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1440 if (!hdev)
1441 return -ENOMEM;
1442
1443 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1444 hdev->io_base = devm_ioremap_resource(dev, res);
1445 if (IS_ERR(hdev->io_base))
1446 return PTR_ERR(hdev->io_base);
1447
1448 hdev->phys_base = res->start;
1449
1450 ret = stm32_hash_get_of_match(hdev, dev);
1451 if (ret)
1452 return ret;
1453
1454 irq = platform_get_irq(pdev, 0);
1455 if (irq < 0)
1456 return irq;
1457
1458 ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1459 stm32_hash_irq_thread, IRQF_ONESHOT,
1460 dev_name(dev), hdev);
1461 if (ret) {
1462 dev_err(dev, "Cannot grab IRQ\n");
1463 return ret;
1464 }
1465
1466 hdev->clk = devm_clk_get(&pdev->dev, NULL);
1467 if (IS_ERR(hdev->clk))
1468 return dev_err_probe(dev, PTR_ERR(hdev->clk),
1469 "failed to get clock for hash\n");
1470
1471 ret = clk_prepare_enable(hdev->clk);
1472 if (ret) {
1473 dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1474 return ret;
1475 }
1476
1477 pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1478 pm_runtime_use_autosuspend(dev);
1479
1480 pm_runtime_get_noresume(dev);
1481 pm_runtime_set_active(dev);
1482 pm_runtime_enable(dev);
1483
1484 hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1485 if (IS_ERR(hdev->rst)) {
1486 if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1487 ret = -EPROBE_DEFER;
1488 goto err_reset;
1489 }
1490 } else {
1491 reset_control_assert(hdev->rst);
1492 udelay(2);
1493 reset_control_deassert(hdev->rst);
1494 }
1495
1496 hdev->dev = dev;
1497
1498 platform_set_drvdata(pdev, hdev);
1499
1500 ret = stm32_hash_dma_init(hdev);
1501 switch (ret) {
1502 case 0:
1503 break;
1504 case -ENOENT:
1505 dev_dbg(dev, "DMA mode not available\n");
1506 break;
1507 default:
1508 goto err_dma;
1509 }
1510
1511 spin_lock(&stm32_hash.lock);
1512 list_add_tail(&hdev->list, &stm32_hash.dev_list);
1513 spin_unlock(&stm32_hash.lock);
1514
1515 /* Initialize crypto engine */
1516 hdev->engine = crypto_engine_alloc_init(dev, 1);
1517 if (!hdev->engine) {
1518 ret = -ENOMEM;
1519 goto err_engine;
1520 }
1521
1522 ret = crypto_engine_start(hdev->engine);
1523 if (ret)
1524 goto err_engine_start;
1525
1526 hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
1527
1528 /* Register algos */
1529 ret = stm32_hash_register_algs(hdev);
1530 if (ret)
1531 goto err_algs;
1532
1533 dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1534 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1535
1536 pm_runtime_put_sync(dev);
1537
1538 return 0;
1539
1540 err_algs:
1541 err_engine_start:
1542 crypto_engine_exit(hdev->engine);
1543 err_engine:
1544 spin_lock(&stm32_hash.lock);
1545 list_del(&hdev->list);
1546 spin_unlock(&stm32_hash.lock);
1547 err_dma:
1548 if (hdev->dma_lch)
1549 dma_release_channel(hdev->dma_lch);
1550 err_reset:
1551 pm_runtime_disable(dev);
1552 pm_runtime_put_noidle(dev);
1553
1554 clk_disable_unprepare(hdev->clk);
1555
1556 return ret;
1557 }
1558
stm32_hash_remove(struct platform_device * pdev)1559 static int stm32_hash_remove(struct platform_device *pdev)
1560 {
1561 struct stm32_hash_dev *hdev;
1562 int ret;
1563
1564 hdev = platform_get_drvdata(pdev);
1565 if (!hdev)
1566 return -ENODEV;
1567
1568 ret = pm_runtime_resume_and_get(hdev->dev);
1569 if (ret < 0)
1570 return ret;
1571
1572 stm32_hash_unregister_algs(hdev);
1573
1574 crypto_engine_exit(hdev->engine);
1575
1576 spin_lock(&stm32_hash.lock);
1577 list_del(&hdev->list);
1578 spin_unlock(&stm32_hash.lock);
1579
1580 if (hdev->dma_lch)
1581 dma_release_channel(hdev->dma_lch);
1582
1583 pm_runtime_disable(hdev->dev);
1584 pm_runtime_put_noidle(hdev->dev);
1585
1586 clk_disable_unprepare(hdev->clk);
1587
1588 return 0;
1589 }
1590
1591 #ifdef CONFIG_PM
stm32_hash_runtime_suspend(struct device * dev)1592 static int stm32_hash_runtime_suspend(struct device *dev)
1593 {
1594 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1595
1596 clk_disable_unprepare(hdev->clk);
1597
1598 return 0;
1599 }
1600
stm32_hash_runtime_resume(struct device * dev)1601 static int stm32_hash_runtime_resume(struct device *dev)
1602 {
1603 struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1604 int ret;
1605
1606 ret = clk_prepare_enable(hdev->clk);
1607 if (ret) {
1608 dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1609 return ret;
1610 }
1611
1612 return 0;
1613 }
1614 #endif
1615
1616 static const struct dev_pm_ops stm32_hash_pm_ops = {
1617 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1618 pm_runtime_force_resume)
1619 SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1620 stm32_hash_runtime_resume, NULL)
1621 };
1622
1623 static struct platform_driver stm32_hash_driver = {
1624 .probe = stm32_hash_probe,
1625 .remove = stm32_hash_remove,
1626 .driver = {
1627 .name = "stm32-hash",
1628 .pm = &stm32_hash_pm_ops,
1629 .of_match_table = stm32_hash_of_match,
1630 }
1631 };
1632
1633 module_platform_driver(stm32_hash_driver);
1634
1635 MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1636 MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1637 MODULE_LICENSE("GPL v2");
1638