1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 Marvell
4 *
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
6 */
7
8 #include <crypto/hmac.h>
9 #include <crypto/md5.h>
10 #include <crypto/sha.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14
15 #include "safexcel.h"
16
17 struct safexcel_ahash_ctx {
18 struct safexcel_context base;
19 struct safexcel_crypto_priv *priv;
20
21 u32 alg;
22
23 u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
24 u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
25 };
26
27 struct safexcel_ahash_req {
28 bool last_req;
29 bool finish;
30 bool hmac;
31 bool needs_inv;
32
33 int nents;
34 dma_addr_t result_dma;
35
36 u32 digest;
37
38 u8 state_sz; /* expected sate size, only set once */
39 u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
40
41 u64 len[2];
42 u64 processed[2];
43
44 u8 cache[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
45 dma_addr_t cache_dma;
46 unsigned int cache_sz;
47
48 u8 cache_next[SHA512_BLOCK_SIZE] __aligned(sizeof(u32));
49 };
50
safexcel_queued_len(struct safexcel_ahash_req * req)51 static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
52 {
53 u64 len, processed;
54
55 len = (0xffffffff * req->len[1]) + req->len[0];
56 processed = (0xffffffff * req->processed[1]) + req->processed[0];
57
58 return len - processed;
59 }
60
safexcel_hash_token(struct safexcel_command_desc * cdesc,u32 input_length,u32 result_length)61 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
62 u32 input_length, u32 result_length)
63 {
64 struct safexcel_token *token =
65 (struct safexcel_token *)cdesc->control_data.token;
66
67 token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
68 token[0].packet_length = input_length;
69 token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
70 token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
71
72 token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
73 token[1].packet_length = result_length;
74 token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
75 EIP197_TOKEN_STAT_LAST_PACKET;
76 token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
77 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
78 }
79
safexcel_context_control(struct safexcel_ahash_ctx * ctx,struct safexcel_ahash_req * req,struct safexcel_command_desc * cdesc,unsigned int digestsize)80 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
81 struct safexcel_ahash_req *req,
82 struct safexcel_command_desc *cdesc,
83 unsigned int digestsize)
84 {
85 struct safexcel_crypto_priv *priv = ctx->priv;
86 int i;
87
88 cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
89 cdesc->control_data.control0 |= ctx->alg;
90 cdesc->control_data.control0 |= req->digest;
91
92 if (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
93 if (req->processed[0] || req->processed[1]) {
94 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
95 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(5);
96 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
97 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
98 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
99 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
100 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
101 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384 ||
102 ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
103 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(17);
104
105 cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
106 } else {
107 cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
108 }
109
110 if (!req->finish)
111 cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
112
113 /*
114 * Copy the input digest if needed, and setup the context
115 * fields. Do this now as we need it to setup the first command
116 * descriptor.
117 */
118 if (req->processed[0] || req->processed[1]) {
119 for (i = 0; i < digestsize / sizeof(u32); i++)
120 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
121
122 if (req->finish) {
123 u64 count = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
124 count += ((0xffffffff / EIP197_COUNTER_BLOCK_SIZE) *
125 req->processed[1]);
126
127 /* This is a haredware limitation, as the
128 * counter must fit into an u32. This represents
129 * a farily big amount of input data, so we
130 * shouldn't see this.
131 */
132 if (unlikely(count & 0xffff0000)) {
133 dev_warn(priv->dev,
134 "Input data is too big\n");
135 return;
136 }
137
138 ctx->base.ctxr->data[i] = cpu_to_le32(count);
139 }
140 }
141 } else if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
142 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(2 * req->state_sz / sizeof(u32));
143
144 memcpy(ctx->base.ctxr->data, ctx->ipad, req->state_sz);
145 memcpy(ctx->base.ctxr->data + req->state_sz / sizeof(u32),
146 ctx->opad, req->state_sz);
147 }
148 }
149
safexcel_handle_req_result(struct safexcel_crypto_priv * priv,int ring,struct crypto_async_request * async,bool * should_complete,int * ret)150 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
151 struct crypto_async_request *async,
152 bool *should_complete, int *ret)
153 {
154 struct safexcel_result_desc *rdesc;
155 struct ahash_request *areq = ahash_request_cast(async);
156 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
157 struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
158 u64 cache_len;
159
160 *ret = 0;
161
162 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
163 if (IS_ERR(rdesc)) {
164 dev_err(priv->dev,
165 "hash: result: could not retrieve the result descriptor\n");
166 *ret = PTR_ERR(rdesc);
167 } else {
168 *ret = safexcel_rdesc_check_errors(priv, rdesc);
169 }
170
171 safexcel_complete(priv, ring);
172
173 if (sreq->nents) {
174 dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
175 sreq->nents = 0;
176 }
177
178 if (sreq->result_dma) {
179 dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
180 DMA_FROM_DEVICE);
181 sreq->result_dma = 0;
182 }
183
184 if (sreq->cache_dma) {
185 dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
186 DMA_TO_DEVICE);
187 sreq->cache_dma = 0;
188 }
189
190 if (sreq->finish)
191 memcpy(areq->result, sreq->state,
192 crypto_ahash_digestsize(ahash));
193
194 cache_len = safexcel_queued_len(sreq);
195 if (cache_len)
196 memcpy(sreq->cache, sreq->cache_next, cache_len);
197
198 *should_complete = true;
199
200 return 1;
201 }
202
safexcel_ahash_send_req(struct crypto_async_request * async,int ring,int * commands,int * results)203 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
204 int *commands, int *results)
205 {
206 struct ahash_request *areq = ahash_request_cast(async);
207 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
208 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
209 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
210 struct safexcel_crypto_priv *priv = ctx->priv;
211 struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
212 struct safexcel_result_desc *rdesc;
213 struct scatterlist *sg;
214 int i, extra, n_cdesc = 0, ret = 0;
215 u64 queued, len, cache_len;
216
217 queued = len = safexcel_queued_len(req);
218 if (queued <= crypto_ahash_blocksize(ahash))
219 cache_len = queued;
220 else
221 cache_len = queued - areq->nbytes;
222
223 if (!req->last_req) {
224 /* If this is not the last request and the queued data does not
225 * fit into full blocks, cache it for the next send() call.
226 */
227 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
228 if (!extra)
229 /* If this is not the last request and the queued data
230 * is a multiple of a block, cache the last one for now.
231 */
232 extra = crypto_ahash_blocksize(ahash);
233
234 if (extra) {
235 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
236 req->cache_next, extra,
237 areq->nbytes - extra);
238
239 queued -= extra;
240 len -= extra;
241
242 if (!queued) {
243 *commands = 0;
244 *results = 0;
245 return 0;
246 }
247 }
248 }
249
250 /* Add a command descriptor for the cached data, if any */
251 if (cache_len) {
252 req->cache_dma = dma_map_single(priv->dev, req->cache,
253 cache_len, DMA_TO_DEVICE);
254 if (dma_mapping_error(priv->dev, req->cache_dma))
255 return -EINVAL;
256
257 req->cache_sz = cache_len;
258 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
259 (cache_len == len),
260 req->cache_dma, cache_len, len,
261 ctx->base.ctxr_dma);
262 if (IS_ERR(first_cdesc)) {
263 ret = PTR_ERR(first_cdesc);
264 goto unmap_cache;
265 }
266 n_cdesc++;
267
268 queued -= cache_len;
269 if (!queued)
270 goto send_command;
271 }
272
273 /* Now handle the current ahash request buffer(s) */
274 req->nents = dma_map_sg(priv->dev, areq->src,
275 sg_nents_for_len(areq->src, areq->nbytes),
276 DMA_TO_DEVICE);
277 if (!req->nents) {
278 ret = -ENOMEM;
279 goto cdesc_rollback;
280 }
281
282 for_each_sg(areq->src, sg, req->nents, i) {
283 int sglen = sg_dma_len(sg);
284
285 /* Do not overflow the request */
286 if (queued < sglen)
287 sglen = queued;
288
289 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
290 !(queued - sglen), sg_dma_address(sg),
291 sglen, len, ctx->base.ctxr_dma);
292 if (IS_ERR(cdesc)) {
293 ret = PTR_ERR(cdesc);
294 goto unmap_sg;
295 }
296 n_cdesc++;
297
298 if (n_cdesc == 1)
299 first_cdesc = cdesc;
300
301 queued -= sglen;
302 if (!queued)
303 break;
304 }
305
306 send_command:
307 /* Setup the context options */
308 safexcel_context_control(ctx, req, first_cdesc, req->state_sz);
309
310 /* Add the token */
311 safexcel_hash_token(first_cdesc, len, req->state_sz);
312
313 req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
314 DMA_FROM_DEVICE);
315 if (dma_mapping_error(priv->dev, req->result_dma)) {
316 ret = -EINVAL;
317 goto unmap_sg;
318 }
319
320 /* Add a result descriptor */
321 rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
322 req->state_sz);
323 if (IS_ERR(rdesc)) {
324 ret = PTR_ERR(rdesc);
325 goto unmap_result;
326 }
327
328 safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
329
330 req->processed[0] += len;
331 if (req->processed[0] < len)
332 req->processed[1]++;
333
334 *commands = n_cdesc;
335 *results = 1;
336 return 0;
337
338 unmap_result:
339 dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
340 DMA_FROM_DEVICE);
341 unmap_sg:
342 dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
343 cdesc_rollback:
344 for (i = 0; i < n_cdesc; i++)
345 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
346 unmap_cache:
347 if (req->cache_dma) {
348 dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
349 DMA_TO_DEVICE);
350 req->cache_sz = 0;
351 }
352
353 return ret;
354 }
355
safexcel_ahash_needs_inv_get(struct ahash_request * areq)356 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
357 {
358 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
359 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
360 unsigned int state_w_sz = req->state_sz / sizeof(u32);
361 u64 processed;
362 int i;
363
364 processed = req->processed[0] / EIP197_COUNTER_BLOCK_SIZE;
365 processed += (0xffffffff / EIP197_COUNTER_BLOCK_SIZE) * req->processed[1];
366
367 for (i = 0; i < state_w_sz; i++)
368 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
369 return true;
370
371 if (ctx->base.ctxr->data[state_w_sz] != cpu_to_le32(processed))
372 return true;
373
374 return false;
375 }
376
safexcel_handle_inv_result(struct safexcel_crypto_priv * priv,int ring,struct crypto_async_request * async,bool * should_complete,int * ret)377 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
378 int ring,
379 struct crypto_async_request *async,
380 bool *should_complete, int *ret)
381 {
382 struct safexcel_result_desc *rdesc;
383 struct ahash_request *areq = ahash_request_cast(async);
384 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
385 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
386 int enq_ret;
387
388 *ret = 0;
389
390 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
391 if (IS_ERR(rdesc)) {
392 dev_err(priv->dev,
393 "hash: invalidate: could not retrieve the result descriptor\n");
394 *ret = PTR_ERR(rdesc);
395 } else {
396 *ret = safexcel_rdesc_check_errors(priv, rdesc);
397 }
398
399 safexcel_complete(priv, ring);
400
401 if (ctx->base.exit_inv) {
402 dma_pool_free(priv->context_pool, ctx->base.ctxr,
403 ctx->base.ctxr_dma);
404
405 *should_complete = true;
406 return 1;
407 }
408
409 ring = safexcel_select_ring(priv);
410 ctx->base.ring = ring;
411
412 spin_lock_bh(&priv->ring[ring].queue_lock);
413 enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
414 spin_unlock_bh(&priv->ring[ring].queue_lock);
415
416 if (enq_ret != -EINPROGRESS)
417 *ret = enq_ret;
418
419 queue_work(priv->ring[ring].workqueue,
420 &priv->ring[ring].work_data.work);
421
422 *should_complete = false;
423
424 return 1;
425 }
426
safexcel_handle_result(struct safexcel_crypto_priv * priv,int ring,struct crypto_async_request * async,bool * should_complete,int * ret)427 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
428 struct crypto_async_request *async,
429 bool *should_complete, int *ret)
430 {
431 struct ahash_request *areq = ahash_request_cast(async);
432 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
433 int err;
434
435 BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && req->needs_inv);
436
437 if (req->needs_inv) {
438 req->needs_inv = false;
439 err = safexcel_handle_inv_result(priv, ring, async,
440 should_complete, ret);
441 } else {
442 err = safexcel_handle_req_result(priv, ring, async,
443 should_complete, ret);
444 }
445
446 return err;
447 }
448
safexcel_ahash_send_inv(struct crypto_async_request * async,int ring,int * commands,int * results)449 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
450 int ring, int *commands, int *results)
451 {
452 struct ahash_request *areq = ahash_request_cast(async);
453 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
454 int ret;
455
456 ret = safexcel_invalidate_cache(async, ctx->priv,
457 ctx->base.ctxr_dma, ring);
458 if (unlikely(ret))
459 return ret;
460
461 *commands = 1;
462 *results = 1;
463
464 return 0;
465 }
466
safexcel_ahash_send(struct crypto_async_request * async,int ring,int * commands,int * results)467 static int safexcel_ahash_send(struct crypto_async_request *async,
468 int ring, int *commands, int *results)
469 {
470 struct ahash_request *areq = ahash_request_cast(async);
471 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
472 int ret;
473
474 if (req->needs_inv)
475 ret = safexcel_ahash_send_inv(async, ring, commands, results);
476 else
477 ret = safexcel_ahash_send_req(async, ring, commands, results);
478
479 return ret;
480 }
481
safexcel_ahash_exit_inv(struct crypto_tfm * tfm)482 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
483 {
484 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
485 struct safexcel_crypto_priv *priv = ctx->priv;
486 EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
487 struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
488 struct safexcel_inv_result result = {};
489 int ring = ctx->base.ring;
490
491 memset(req, 0, EIP197_AHASH_REQ_SIZE);
492
493 /* create invalidation request */
494 init_completion(&result.completion);
495 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
496 safexcel_inv_complete, &result);
497
498 ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
499 ctx = crypto_tfm_ctx(req->base.tfm);
500 ctx->base.exit_inv = true;
501 rctx->needs_inv = true;
502
503 spin_lock_bh(&priv->ring[ring].queue_lock);
504 crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
505 spin_unlock_bh(&priv->ring[ring].queue_lock);
506
507 queue_work(priv->ring[ring].workqueue,
508 &priv->ring[ring].work_data.work);
509
510 wait_for_completion(&result.completion);
511
512 if (result.error) {
513 dev_warn(priv->dev, "hash: completion error (%d)\n",
514 result.error);
515 return result.error;
516 }
517
518 return 0;
519 }
520
521 /* safexcel_ahash_cache: cache data until at least one request can be sent to
522 * the engine, aka. when there is at least 1 block size in the pipe.
523 */
safexcel_ahash_cache(struct ahash_request * areq)524 static int safexcel_ahash_cache(struct ahash_request *areq)
525 {
526 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
527 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
528 u64 queued, cache_len;
529
530 /* queued: everything accepted by the driver which will be handled by
531 * the next send() calls.
532 * tot sz handled by update() - tot sz handled by send()
533 */
534 queued = safexcel_queued_len(req);
535 /* cache_len: everything accepted by the driver but not sent yet,
536 * tot sz handled by update() - last req sz - tot sz handled by send()
537 */
538 cache_len = queued - areq->nbytes;
539
540 /*
541 * In case there isn't enough bytes to proceed (less than a
542 * block size), cache the data until we have enough.
543 */
544 if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
545 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
546 req->cache + cache_len,
547 areq->nbytes, 0);
548 return areq->nbytes;
549 }
550
551 /* We couldn't cache all the data */
552 return -E2BIG;
553 }
554
safexcel_ahash_enqueue(struct ahash_request * areq)555 static int safexcel_ahash_enqueue(struct ahash_request *areq)
556 {
557 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
558 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
559 struct safexcel_crypto_priv *priv = ctx->priv;
560 int ret, ring;
561
562 req->needs_inv = false;
563
564 if (ctx->base.ctxr) {
565 if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
566 (req->processed[0] || req->processed[1]) &&
567 req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
568 /* We're still setting needs_inv here, even though it is
569 * cleared right away, because the needs_inv flag can be
570 * set in other functions and we want to keep the same
571 * logic.
572 */
573 ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
574
575 if (ctx->base.needs_inv) {
576 ctx->base.needs_inv = false;
577 req->needs_inv = true;
578 }
579 } else {
580 ctx->base.ring = safexcel_select_ring(priv);
581 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
582 EIP197_GFP_FLAGS(areq->base),
583 &ctx->base.ctxr_dma);
584 if (!ctx->base.ctxr)
585 return -ENOMEM;
586 }
587
588 ring = ctx->base.ring;
589
590 spin_lock_bh(&priv->ring[ring].queue_lock);
591 ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
592 spin_unlock_bh(&priv->ring[ring].queue_lock);
593
594 queue_work(priv->ring[ring].workqueue,
595 &priv->ring[ring].work_data.work);
596
597 return ret;
598 }
599
safexcel_ahash_update(struct ahash_request * areq)600 static int safexcel_ahash_update(struct ahash_request *areq)
601 {
602 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
603 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
604
605 /* If the request is 0 length, do nothing */
606 if (!areq->nbytes)
607 return 0;
608
609 req->len[0] += areq->nbytes;
610 if (req->len[0] < areq->nbytes)
611 req->len[1]++;
612
613 safexcel_ahash_cache(areq);
614
615 /*
616 * We're not doing partial updates when performing an hmac request.
617 * Everything will be handled by the final() call.
618 */
619 if (req->digest == CONTEXT_CONTROL_DIGEST_HMAC)
620 return 0;
621
622 if (req->hmac)
623 return safexcel_ahash_enqueue(areq);
624
625 if (!req->last_req &&
626 safexcel_queued_len(req) > crypto_ahash_blocksize(ahash))
627 return safexcel_ahash_enqueue(areq);
628
629 return 0;
630 }
631
safexcel_ahash_final(struct ahash_request * areq)632 static int safexcel_ahash_final(struct ahash_request *areq)
633 {
634 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
635 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
636
637 req->last_req = true;
638 req->finish = true;
639
640 /* If we have an overall 0 length request */
641 if (!req->len[0] && !req->len[1] && !areq->nbytes) {
642 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5)
643 memcpy(areq->result, md5_zero_message_hash,
644 MD5_DIGEST_SIZE);
645 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
646 memcpy(areq->result, sha1_zero_message_hash,
647 SHA1_DIGEST_SIZE);
648 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
649 memcpy(areq->result, sha224_zero_message_hash,
650 SHA224_DIGEST_SIZE);
651 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
652 memcpy(areq->result, sha256_zero_message_hash,
653 SHA256_DIGEST_SIZE);
654 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA384)
655 memcpy(areq->result, sha384_zero_message_hash,
656 SHA384_DIGEST_SIZE);
657 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
658 memcpy(areq->result, sha512_zero_message_hash,
659 SHA512_DIGEST_SIZE);
660
661 return 0;
662 }
663
664 return safexcel_ahash_enqueue(areq);
665 }
666
safexcel_ahash_finup(struct ahash_request * areq)667 static int safexcel_ahash_finup(struct ahash_request *areq)
668 {
669 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
670
671 req->last_req = true;
672 req->finish = true;
673
674 safexcel_ahash_update(areq);
675 return safexcel_ahash_final(areq);
676 }
677
safexcel_ahash_export(struct ahash_request * areq,void * out)678 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
679 {
680 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
681 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
682 struct safexcel_ahash_export_state *export = out;
683
684 export->len[0] = req->len[0];
685 export->len[1] = req->len[1];
686 export->processed[0] = req->processed[0];
687 export->processed[1] = req->processed[1];
688
689 export->digest = req->digest;
690
691 memcpy(export->state, req->state, req->state_sz);
692 memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
693
694 return 0;
695 }
696
safexcel_ahash_import(struct ahash_request * areq,const void * in)697 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
698 {
699 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
700 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
701 const struct safexcel_ahash_export_state *export = in;
702 int ret;
703
704 ret = crypto_ahash_init(areq);
705 if (ret)
706 return ret;
707
708 req->len[0] = export->len[0];
709 req->len[1] = export->len[1];
710 req->processed[0] = export->processed[0];
711 req->processed[1] = export->processed[1];
712
713 req->digest = export->digest;
714
715 memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
716 memcpy(req->state, export->state, req->state_sz);
717
718 return 0;
719 }
720
safexcel_ahash_cra_init(struct crypto_tfm * tfm)721 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
722 {
723 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
724 struct safexcel_alg_template *tmpl =
725 container_of(__crypto_ahash_alg(tfm->__crt_alg),
726 struct safexcel_alg_template, alg.ahash);
727
728 ctx->priv = tmpl->priv;
729 ctx->base.send = safexcel_ahash_send;
730 ctx->base.handle_result = safexcel_handle_result;
731
732 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
733 sizeof(struct safexcel_ahash_req));
734 return 0;
735 }
736
safexcel_sha1_init(struct ahash_request * areq)737 static int safexcel_sha1_init(struct ahash_request *areq)
738 {
739 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
740 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
741
742 memset(req, 0, sizeof(*req));
743
744 req->state[0] = SHA1_H0;
745 req->state[1] = SHA1_H1;
746 req->state[2] = SHA1_H2;
747 req->state[3] = SHA1_H3;
748 req->state[4] = SHA1_H4;
749
750 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
751 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
752 req->state_sz = SHA1_DIGEST_SIZE;
753
754 return 0;
755 }
756
safexcel_sha1_digest(struct ahash_request * areq)757 static int safexcel_sha1_digest(struct ahash_request *areq)
758 {
759 int ret = safexcel_sha1_init(areq);
760
761 if (ret)
762 return ret;
763
764 return safexcel_ahash_finup(areq);
765 }
766
safexcel_ahash_cra_exit(struct crypto_tfm * tfm)767 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
768 {
769 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
770 struct safexcel_crypto_priv *priv = ctx->priv;
771 int ret;
772
773 /* context not allocated, skip invalidation */
774 if (!ctx->base.ctxr)
775 return;
776
777 if (priv->flags & EIP197_TRC_CACHE) {
778 ret = safexcel_ahash_exit_inv(tfm);
779 if (ret)
780 dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
781 } else {
782 dma_pool_free(priv->context_pool, ctx->base.ctxr,
783 ctx->base.ctxr_dma);
784 }
785 }
786
787 struct safexcel_alg_template safexcel_alg_sha1 = {
788 .type = SAFEXCEL_ALG_TYPE_AHASH,
789 .engines = EIP97IES | EIP197B | EIP197D,
790 .alg.ahash = {
791 .init = safexcel_sha1_init,
792 .update = safexcel_ahash_update,
793 .final = safexcel_ahash_final,
794 .finup = safexcel_ahash_finup,
795 .digest = safexcel_sha1_digest,
796 .export = safexcel_ahash_export,
797 .import = safexcel_ahash_import,
798 .halg = {
799 .digestsize = SHA1_DIGEST_SIZE,
800 .statesize = sizeof(struct safexcel_ahash_export_state),
801 .base = {
802 .cra_name = "sha1",
803 .cra_driver_name = "safexcel-sha1",
804 .cra_priority = 300,
805 .cra_flags = CRYPTO_ALG_ASYNC |
806 CRYPTO_ALG_KERN_DRIVER_ONLY,
807 .cra_blocksize = SHA1_BLOCK_SIZE,
808 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
809 .cra_init = safexcel_ahash_cra_init,
810 .cra_exit = safexcel_ahash_cra_exit,
811 .cra_module = THIS_MODULE,
812 },
813 },
814 },
815 };
816
safexcel_hmac_sha1_init(struct ahash_request * areq)817 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
818 {
819 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
820
821 safexcel_sha1_init(areq);
822 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
823 return 0;
824 }
825
safexcel_hmac_sha1_digest(struct ahash_request * areq)826 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
827 {
828 int ret = safexcel_hmac_sha1_init(areq);
829
830 if (ret)
831 return ret;
832
833 return safexcel_ahash_finup(areq);
834 }
835
836 struct safexcel_ahash_result {
837 struct completion completion;
838 int error;
839 };
840
safexcel_ahash_complete(struct crypto_async_request * req,int error)841 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
842 {
843 struct safexcel_ahash_result *result = req->data;
844
845 if (error == -EINPROGRESS)
846 return;
847
848 result->error = error;
849 complete(&result->completion);
850 }
851
safexcel_hmac_init_pad(struct ahash_request * areq,unsigned int blocksize,const u8 * key,unsigned int keylen,u8 * ipad,u8 * opad)852 static int safexcel_hmac_init_pad(struct ahash_request *areq,
853 unsigned int blocksize, const u8 *key,
854 unsigned int keylen, u8 *ipad, u8 *opad)
855 {
856 struct safexcel_ahash_result result;
857 struct scatterlist sg;
858 int ret, i;
859 u8 *keydup;
860
861 if (keylen <= blocksize) {
862 memcpy(ipad, key, keylen);
863 } else {
864 keydup = kmemdup(key, keylen, GFP_KERNEL);
865 if (!keydup)
866 return -ENOMEM;
867
868 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
869 safexcel_ahash_complete, &result);
870 sg_init_one(&sg, keydup, keylen);
871 ahash_request_set_crypt(areq, &sg, ipad, keylen);
872 init_completion(&result.completion);
873
874 ret = crypto_ahash_digest(areq);
875 if (ret == -EINPROGRESS || ret == -EBUSY) {
876 wait_for_completion_interruptible(&result.completion);
877 ret = result.error;
878 }
879
880 /* Avoid leaking */
881 memzero_explicit(keydup, keylen);
882 kfree(keydup);
883
884 if (ret)
885 return ret;
886
887 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
888 }
889
890 memset(ipad + keylen, 0, blocksize - keylen);
891 memcpy(opad, ipad, blocksize);
892
893 for (i = 0; i < blocksize; i++) {
894 ipad[i] ^= HMAC_IPAD_VALUE;
895 opad[i] ^= HMAC_OPAD_VALUE;
896 }
897
898 return 0;
899 }
900
safexcel_hmac_init_iv(struct ahash_request * areq,unsigned int blocksize,u8 * pad,void * state)901 static int safexcel_hmac_init_iv(struct ahash_request *areq,
902 unsigned int blocksize, u8 *pad, void *state)
903 {
904 struct safexcel_ahash_result result;
905 struct safexcel_ahash_req *req;
906 struct scatterlist sg;
907 int ret;
908
909 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
910 safexcel_ahash_complete, &result);
911 sg_init_one(&sg, pad, blocksize);
912 ahash_request_set_crypt(areq, &sg, pad, blocksize);
913 init_completion(&result.completion);
914
915 ret = crypto_ahash_init(areq);
916 if (ret)
917 return ret;
918
919 req = ahash_request_ctx(areq);
920 req->hmac = true;
921 req->last_req = true;
922
923 ret = crypto_ahash_update(areq);
924 if (ret && ret != -EINPROGRESS && ret != -EBUSY)
925 return ret;
926
927 wait_for_completion_interruptible(&result.completion);
928 if (result.error)
929 return result.error;
930
931 return crypto_ahash_export(areq, state);
932 }
933
safexcel_hmac_setkey(const char * alg,const u8 * key,unsigned int keylen,void * istate,void * ostate)934 int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
935 void *istate, void *ostate)
936 {
937 struct ahash_request *areq;
938 struct crypto_ahash *tfm;
939 unsigned int blocksize;
940 u8 *ipad, *opad;
941 int ret;
942
943 tfm = crypto_alloc_ahash(alg, 0, 0);
944 if (IS_ERR(tfm))
945 return PTR_ERR(tfm);
946
947 areq = ahash_request_alloc(tfm, GFP_KERNEL);
948 if (!areq) {
949 ret = -ENOMEM;
950 goto free_ahash;
951 }
952
953 crypto_ahash_clear_flags(tfm, ~0);
954 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
955
956 ipad = kcalloc(2, blocksize, GFP_KERNEL);
957 if (!ipad) {
958 ret = -ENOMEM;
959 goto free_request;
960 }
961
962 opad = ipad + blocksize;
963
964 ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
965 if (ret)
966 goto free_ipad;
967
968 ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
969 if (ret)
970 goto free_ipad;
971
972 ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
973
974 free_ipad:
975 kfree(ipad);
976 free_request:
977 ahash_request_free(areq);
978 free_ahash:
979 crypto_free_ahash(tfm);
980
981 return ret;
982 }
983
safexcel_hmac_alg_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen,const char * alg,unsigned int state_sz)984 static int safexcel_hmac_alg_setkey(struct crypto_ahash *tfm, const u8 *key,
985 unsigned int keylen, const char *alg,
986 unsigned int state_sz)
987 {
988 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
989 struct safexcel_crypto_priv *priv = ctx->priv;
990 struct safexcel_ahash_export_state istate, ostate;
991 int ret, i;
992
993 ret = safexcel_hmac_setkey(alg, key, keylen, &istate, &ostate);
994 if (ret)
995 return ret;
996
997 if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr) {
998 for (i = 0; i < state_sz / sizeof(u32); i++) {
999 if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
1000 ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
1001 ctx->base.needs_inv = true;
1002 break;
1003 }
1004 }
1005 }
1006
1007 memcpy(ctx->ipad, &istate.state, state_sz);
1008 memcpy(ctx->opad, &ostate.state, state_sz);
1009
1010 return 0;
1011 }
1012
safexcel_hmac_sha1_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1013 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1014 unsigned int keylen)
1015 {
1016 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha1",
1017 SHA1_DIGEST_SIZE);
1018 }
1019
1020 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
1021 .type = SAFEXCEL_ALG_TYPE_AHASH,
1022 .engines = EIP97IES | EIP197B | EIP197D,
1023 .alg.ahash = {
1024 .init = safexcel_hmac_sha1_init,
1025 .update = safexcel_ahash_update,
1026 .final = safexcel_ahash_final,
1027 .finup = safexcel_ahash_finup,
1028 .digest = safexcel_hmac_sha1_digest,
1029 .setkey = safexcel_hmac_sha1_setkey,
1030 .export = safexcel_ahash_export,
1031 .import = safexcel_ahash_import,
1032 .halg = {
1033 .digestsize = SHA1_DIGEST_SIZE,
1034 .statesize = sizeof(struct safexcel_ahash_export_state),
1035 .base = {
1036 .cra_name = "hmac(sha1)",
1037 .cra_driver_name = "safexcel-hmac-sha1",
1038 .cra_priority = 300,
1039 .cra_flags = CRYPTO_ALG_ASYNC |
1040 CRYPTO_ALG_KERN_DRIVER_ONLY,
1041 .cra_blocksize = SHA1_BLOCK_SIZE,
1042 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1043 .cra_init = safexcel_ahash_cra_init,
1044 .cra_exit = safexcel_ahash_cra_exit,
1045 .cra_module = THIS_MODULE,
1046 },
1047 },
1048 },
1049 };
1050
safexcel_sha256_init(struct ahash_request * areq)1051 static int safexcel_sha256_init(struct ahash_request *areq)
1052 {
1053 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1054 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1055
1056 memset(req, 0, sizeof(*req));
1057
1058 req->state[0] = SHA256_H0;
1059 req->state[1] = SHA256_H1;
1060 req->state[2] = SHA256_H2;
1061 req->state[3] = SHA256_H3;
1062 req->state[4] = SHA256_H4;
1063 req->state[5] = SHA256_H5;
1064 req->state[6] = SHA256_H6;
1065 req->state[7] = SHA256_H7;
1066
1067 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1068 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1069 req->state_sz = SHA256_DIGEST_SIZE;
1070
1071 return 0;
1072 }
1073
safexcel_sha256_digest(struct ahash_request * areq)1074 static int safexcel_sha256_digest(struct ahash_request *areq)
1075 {
1076 int ret = safexcel_sha256_init(areq);
1077
1078 if (ret)
1079 return ret;
1080
1081 return safexcel_ahash_finup(areq);
1082 }
1083
1084 struct safexcel_alg_template safexcel_alg_sha256 = {
1085 .type = SAFEXCEL_ALG_TYPE_AHASH,
1086 .engines = EIP97IES | EIP197B | EIP197D,
1087 .alg.ahash = {
1088 .init = safexcel_sha256_init,
1089 .update = safexcel_ahash_update,
1090 .final = safexcel_ahash_final,
1091 .finup = safexcel_ahash_finup,
1092 .digest = safexcel_sha256_digest,
1093 .export = safexcel_ahash_export,
1094 .import = safexcel_ahash_import,
1095 .halg = {
1096 .digestsize = SHA256_DIGEST_SIZE,
1097 .statesize = sizeof(struct safexcel_ahash_export_state),
1098 .base = {
1099 .cra_name = "sha256",
1100 .cra_driver_name = "safexcel-sha256",
1101 .cra_priority = 300,
1102 .cra_flags = CRYPTO_ALG_ASYNC |
1103 CRYPTO_ALG_KERN_DRIVER_ONLY,
1104 .cra_blocksize = SHA256_BLOCK_SIZE,
1105 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1106 .cra_init = safexcel_ahash_cra_init,
1107 .cra_exit = safexcel_ahash_cra_exit,
1108 .cra_module = THIS_MODULE,
1109 },
1110 },
1111 },
1112 };
1113
safexcel_sha224_init(struct ahash_request * areq)1114 static int safexcel_sha224_init(struct ahash_request *areq)
1115 {
1116 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1117 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1118
1119 memset(req, 0, sizeof(*req));
1120
1121 req->state[0] = SHA224_H0;
1122 req->state[1] = SHA224_H1;
1123 req->state[2] = SHA224_H2;
1124 req->state[3] = SHA224_H3;
1125 req->state[4] = SHA224_H4;
1126 req->state[5] = SHA224_H5;
1127 req->state[6] = SHA224_H6;
1128 req->state[7] = SHA224_H7;
1129
1130 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1131 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1132 req->state_sz = SHA256_DIGEST_SIZE;
1133
1134 return 0;
1135 }
1136
safexcel_sha224_digest(struct ahash_request * areq)1137 static int safexcel_sha224_digest(struct ahash_request *areq)
1138 {
1139 int ret = safexcel_sha224_init(areq);
1140
1141 if (ret)
1142 return ret;
1143
1144 return safexcel_ahash_finup(areq);
1145 }
1146
1147 struct safexcel_alg_template safexcel_alg_sha224 = {
1148 .type = SAFEXCEL_ALG_TYPE_AHASH,
1149 .engines = EIP97IES | EIP197B | EIP197D,
1150 .alg.ahash = {
1151 .init = safexcel_sha224_init,
1152 .update = safexcel_ahash_update,
1153 .final = safexcel_ahash_final,
1154 .finup = safexcel_ahash_finup,
1155 .digest = safexcel_sha224_digest,
1156 .export = safexcel_ahash_export,
1157 .import = safexcel_ahash_import,
1158 .halg = {
1159 .digestsize = SHA224_DIGEST_SIZE,
1160 .statesize = sizeof(struct safexcel_ahash_export_state),
1161 .base = {
1162 .cra_name = "sha224",
1163 .cra_driver_name = "safexcel-sha224",
1164 .cra_priority = 300,
1165 .cra_flags = CRYPTO_ALG_ASYNC |
1166 CRYPTO_ALG_KERN_DRIVER_ONLY,
1167 .cra_blocksize = SHA224_BLOCK_SIZE,
1168 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1169 .cra_init = safexcel_ahash_cra_init,
1170 .cra_exit = safexcel_ahash_cra_exit,
1171 .cra_module = THIS_MODULE,
1172 },
1173 },
1174 },
1175 };
1176
safexcel_hmac_sha224_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1177 static int safexcel_hmac_sha224_setkey(struct crypto_ahash *tfm, const u8 *key,
1178 unsigned int keylen)
1179 {
1180 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha224",
1181 SHA256_DIGEST_SIZE);
1182 }
1183
safexcel_hmac_sha224_init(struct ahash_request * areq)1184 static int safexcel_hmac_sha224_init(struct ahash_request *areq)
1185 {
1186 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1187
1188 safexcel_sha224_init(areq);
1189 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1190 return 0;
1191 }
1192
safexcel_hmac_sha224_digest(struct ahash_request * areq)1193 static int safexcel_hmac_sha224_digest(struct ahash_request *areq)
1194 {
1195 int ret = safexcel_hmac_sha224_init(areq);
1196
1197 if (ret)
1198 return ret;
1199
1200 return safexcel_ahash_finup(areq);
1201 }
1202
1203 struct safexcel_alg_template safexcel_alg_hmac_sha224 = {
1204 .type = SAFEXCEL_ALG_TYPE_AHASH,
1205 .engines = EIP97IES | EIP197B | EIP197D,
1206 .alg.ahash = {
1207 .init = safexcel_hmac_sha224_init,
1208 .update = safexcel_ahash_update,
1209 .final = safexcel_ahash_final,
1210 .finup = safexcel_ahash_finup,
1211 .digest = safexcel_hmac_sha224_digest,
1212 .setkey = safexcel_hmac_sha224_setkey,
1213 .export = safexcel_ahash_export,
1214 .import = safexcel_ahash_import,
1215 .halg = {
1216 .digestsize = SHA224_DIGEST_SIZE,
1217 .statesize = sizeof(struct safexcel_ahash_export_state),
1218 .base = {
1219 .cra_name = "hmac(sha224)",
1220 .cra_driver_name = "safexcel-hmac-sha224",
1221 .cra_priority = 300,
1222 .cra_flags = CRYPTO_ALG_ASYNC |
1223 CRYPTO_ALG_KERN_DRIVER_ONLY,
1224 .cra_blocksize = SHA224_BLOCK_SIZE,
1225 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1226 .cra_init = safexcel_ahash_cra_init,
1227 .cra_exit = safexcel_ahash_cra_exit,
1228 .cra_module = THIS_MODULE,
1229 },
1230 },
1231 },
1232 };
1233
safexcel_hmac_sha256_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1234 static int safexcel_hmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1235 unsigned int keylen)
1236 {
1237 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha256",
1238 SHA256_DIGEST_SIZE);
1239 }
1240
safexcel_hmac_sha256_init(struct ahash_request * areq)1241 static int safexcel_hmac_sha256_init(struct ahash_request *areq)
1242 {
1243 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1244
1245 safexcel_sha256_init(areq);
1246 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1247 return 0;
1248 }
1249
safexcel_hmac_sha256_digest(struct ahash_request * areq)1250 static int safexcel_hmac_sha256_digest(struct ahash_request *areq)
1251 {
1252 int ret = safexcel_hmac_sha256_init(areq);
1253
1254 if (ret)
1255 return ret;
1256
1257 return safexcel_ahash_finup(areq);
1258 }
1259
1260 struct safexcel_alg_template safexcel_alg_hmac_sha256 = {
1261 .type = SAFEXCEL_ALG_TYPE_AHASH,
1262 .engines = EIP97IES | EIP197B | EIP197D,
1263 .alg.ahash = {
1264 .init = safexcel_hmac_sha256_init,
1265 .update = safexcel_ahash_update,
1266 .final = safexcel_ahash_final,
1267 .finup = safexcel_ahash_finup,
1268 .digest = safexcel_hmac_sha256_digest,
1269 .setkey = safexcel_hmac_sha256_setkey,
1270 .export = safexcel_ahash_export,
1271 .import = safexcel_ahash_import,
1272 .halg = {
1273 .digestsize = SHA256_DIGEST_SIZE,
1274 .statesize = sizeof(struct safexcel_ahash_export_state),
1275 .base = {
1276 .cra_name = "hmac(sha256)",
1277 .cra_driver_name = "safexcel-hmac-sha256",
1278 .cra_priority = 300,
1279 .cra_flags = CRYPTO_ALG_ASYNC |
1280 CRYPTO_ALG_KERN_DRIVER_ONLY,
1281 .cra_blocksize = SHA256_BLOCK_SIZE,
1282 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1283 .cra_init = safexcel_ahash_cra_init,
1284 .cra_exit = safexcel_ahash_cra_exit,
1285 .cra_module = THIS_MODULE,
1286 },
1287 },
1288 },
1289 };
1290
safexcel_sha512_init(struct ahash_request * areq)1291 static int safexcel_sha512_init(struct ahash_request *areq)
1292 {
1293 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1294 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1295
1296 memset(req, 0, sizeof(*req));
1297
1298 req->state[0] = lower_32_bits(SHA512_H0);
1299 req->state[1] = upper_32_bits(SHA512_H0);
1300 req->state[2] = lower_32_bits(SHA512_H1);
1301 req->state[3] = upper_32_bits(SHA512_H1);
1302 req->state[4] = lower_32_bits(SHA512_H2);
1303 req->state[5] = upper_32_bits(SHA512_H2);
1304 req->state[6] = lower_32_bits(SHA512_H3);
1305 req->state[7] = upper_32_bits(SHA512_H3);
1306 req->state[8] = lower_32_bits(SHA512_H4);
1307 req->state[9] = upper_32_bits(SHA512_H4);
1308 req->state[10] = lower_32_bits(SHA512_H5);
1309 req->state[11] = upper_32_bits(SHA512_H5);
1310 req->state[12] = lower_32_bits(SHA512_H6);
1311 req->state[13] = upper_32_bits(SHA512_H6);
1312 req->state[14] = lower_32_bits(SHA512_H7);
1313 req->state[15] = upper_32_bits(SHA512_H7);
1314
1315 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
1316 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1317 req->state_sz = SHA512_DIGEST_SIZE;
1318
1319 return 0;
1320 }
1321
safexcel_sha512_digest(struct ahash_request * areq)1322 static int safexcel_sha512_digest(struct ahash_request *areq)
1323 {
1324 int ret = safexcel_sha512_init(areq);
1325
1326 if (ret)
1327 return ret;
1328
1329 return safexcel_ahash_finup(areq);
1330 }
1331
1332 struct safexcel_alg_template safexcel_alg_sha512 = {
1333 .type = SAFEXCEL_ALG_TYPE_AHASH,
1334 .engines = EIP97IES | EIP197B | EIP197D,
1335 .alg.ahash = {
1336 .init = safexcel_sha512_init,
1337 .update = safexcel_ahash_update,
1338 .final = safexcel_ahash_final,
1339 .finup = safexcel_ahash_finup,
1340 .digest = safexcel_sha512_digest,
1341 .export = safexcel_ahash_export,
1342 .import = safexcel_ahash_import,
1343 .halg = {
1344 .digestsize = SHA512_DIGEST_SIZE,
1345 .statesize = sizeof(struct safexcel_ahash_export_state),
1346 .base = {
1347 .cra_name = "sha512",
1348 .cra_driver_name = "safexcel-sha512",
1349 .cra_priority = 300,
1350 .cra_flags = CRYPTO_ALG_ASYNC |
1351 CRYPTO_ALG_KERN_DRIVER_ONLY,
1352 .cra_blocksize = SHA512_BLOCK_SIZE,
1353 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1354 .cra_init = safexcel_ahash_cra_init,
1355 .cra_exit = safexcel_ahash_cra_exit,
1356 .cra_module = THIS_MODULE,
1357 },
1358 },
1359 },
1360 };
1361
safexcel_sha384_init(struct ahash_request * areq)1362 static int safexcel_sha384_init(struct ahash_request *areq)
1363 {
1364 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1365 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1366
1367 memset(req, 0, sizeof(*req));
1368
1369 req->state[0] = lower_32_bits(SHA384_H0);
1370 req->state[1] = upper_32_bits(SHA384_H0);
1371 req->state[2] = lower_32_bits(SHA384_H1);
1372 req->state[3] = upper_32_bits(SHA384_H1);
1373 req->state[4] = lower_32_bits(SHA384_H2);
1374 req->state[5] = upper_32_bits(SHA384_H2);
1375 req->state[6] = lower_32_bits(SHA384_H3);
1376 req->state[7] = upper_32_bits(SHA384_H3);
1377 req->state[8] = lower_32_bits(SHA384_H4);
1378 req->state[9] = upper_32_bits(SHA384_H4);
1379 req->state[10] = lower_32_bits(SHA384_H5);
1380 req->state[11] = upper_32_bits(SHA384_H5);
1381 req->state[12] = lower_32_bits(SHA384_H6);
1382 req->state[13] = upper_32_bits(SHA384_H6);
1383 req->state[14] = lower_32_bits(SHA384_H7);
1384 req->state[15] = upper_32_bits(SHA384_H7);
1385
1386 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
1387 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1388 req->state_sz = SHA512_DIGEST_SIZE;
1389
1390 return 0;
1391 }
1392
safexcel_sha384_digest(struct ahash_request * areq)1393 static int safexcel_sha384_digest(struct ahash_request *areq)
1394 {
1395 int ret = safexcel_sha384_init(areq);
1396
1397 if (ret)
1398 return ret;
1399
1400 return safexcel_ahash_finup(areq);
1401 }
1402
1403 struct safexcel_alg_template safexcel_alg_sha384 = {
1404 .type = SAFEXCEL_ALG_TYPE_AHASH,
1405 .engines = EIP97IES | EIP197B | EIP197D,
1406 .alg.ahash = {
1407 .init = safexcel_sha384_init,
1408 .update = safexcel_ahash_update,
1409 .final = safexcel_ahash_final,
1410 .finup = safexcel_ahash_finup,
1411 .digest = safexcel_sha384_digest,
1412 .export = safexcel_ahash_export,
1413 .import = safexcel_ahash_import,
1414 .halg = {
1415 .digestsize = SHA384_DIGEST_SIZE,
1416 .statesize = sizeof(struct safexcel_ahash_export_state),
1417 .base = {
1418 .cra_name = "sha384",
1419 .cra_driver_name = "safexcel-sha384",
1420 .cra_priority = 300,
1421 .cra_flags = CRYPTO_ALG_ASYNC |
1422 CRYPTO_ALG_KERN_DRIVER_ONLY,
1423 .cra_blocksize = SHA384_BLOCK_SIZE,
1424 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1425 .cra_init = safexcel_ahash_cra_init,
1426 .cra_exit = safexcel_ahash_cra_exit,
1427 .cra_module = THIS_MODULE,
1428 },
1429 },
1430 },
1431 };
1432
safexcel_hmac_sha512_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1433 static int safexcel_hmac_sha512_setkey(struct crypto_ahash *tfm, const u8 *key,
1434 unsigned int keylen)
1435 {
1436 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha512",
1437 SHA512_DIGEST_SIZE);
1438 }
1439
safexcel_hmac_sha512_init(struct ahash_request * areq)1440 static int safexcel_hmac_sha512_init(struct ahash_request *areq)
1441 {
1442 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1443
1444 safexcel_sha512_init(areq);
1445 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1446 return 0;
1447 }
1448
safexcel_hmac_sha512_digest(struct ahash_request * areq)1449 static int safexcel_hmac_sha512_digest(struct ahash_request *areq)
1450 {
1451 int ret = safexcel_hmac_sha512_init(areq);
1452
1453 if (ret)
1454 return ret;
1455
1456 return safexcel_ahash_finup(areq);
1457 }
1458
1459 struct safexcel_alg_template safexcel_alg_hmac_sha512 = {
1460 .type = SAFEXCEL_ALG_TYPE_AHASH,
1461 .engines = EIP97IES | EIP197B | EIP197D,
1462 .alg.ahash = {
1463 .init = safexcel_hmac_sha512_init,
1464 .update = safexcel_ahash_update,
1465 .final = safexcel_ahash_final,
1466 .finup = safexcel_ahash_finup,
1467 .digest = safexcel_hmac_sha512_digest,
1468 .setkey = safexcel_hmac_sha512_setkey,
1469 .export = safexcel_ahash_export,
1470 .import = safexcel_ahash_import,
1471 .halg = {
1472 .digestsize = SHA512_DIGEST_SIZE,
1473 .statesize = sizeof(struct safexcel_ahash_export_state),
1474 .base = {
1475 .cra_name = "hmac(sha512)",
1476 .cra_driver_name = "safexcel-hmac-sha512",
1477 .cra_priority = 300,
1478 .cra_flags = CRYPTO_ALG_ASYNC |
1479 CRYPTO_ALG_KERN_DRIVER_ONLY,
1480 .cra_blocksize = SHA512_BLOCK_SIZE,
1481 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1482 .cra_init = safexcel_ahash_cra_init,
1483 .cra_exit = safexcel_ahash_cra_exit,
1484 .cra_module = THIS_MODULE,
1485 },
1486 },
1487 },
1488 };
1489
safexcel_hmac_sha384_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1490 static int safexcel_hmac_sha384_setkey(struct crypto_ahash *tfm, const u8 *key,
1491 unsigned int keylen)
1492 {
1493 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sha384",
1494 SHA512_DIGEST_SIZE);
1495 }
1496
safexcel_hmac_sha384_init(struct ahash_request * areq)1497 static int safexcel_hmac_sha384_init(struct ahash_request *areq)
1498 {
1499 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1500
1501 safexcel_sha384_init(areq);
1502 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1503 return 0;
1504 }
1505
safexcel_hmac_sha384_digest(struct ahash_request * areq)1506 static int safexcel_hmac_sha384_digest(struct ahash_request *areq)
1507 {
1508 int ret = safexcel_hmac_sha384_init(areq);
1509
1510 if (ret)
1511 return ret;
1512
1513 return safexcel_ahash_finup(areq);
1514 }
1515
1516 struct safexcel_alg_template safexcel_alg_hmac_sha384 = {
1517 .type = SAFEXCEL_ALG_TYPE_AHASH,
1518 .engines = EIP97IES | EIP197B | EIP197D,
1519 .alg.ahash = {
1520 .init = safexcel_hmac_sha384_init,
1521 .update = safexcel_ahash_update,
1522 .final = safexcel_ahash_final,
1523 .finup = safexcel_ahash_finup,
1524 .digest = safexcel_hmac_sha384_digest,
1525 .setkey = safexcel_hmac_sha384_setkey,
1526 .export = safexcel_ahash_export,
1527 .import = safexcel_ahash_import,
1528 .halg = {
1529 .digestsize = SHA384_DIGEST_SIZE,
1530 .statesize = sizeof(struct safexcel_ahash_export_state),
1531 .base = {
1532 .cra_name = "hmac(sha384)",
1533 .cra_driver_name = "safexcel-hmac-sha384",
1534 .cra_priority = 300,
1535 .cra_flags = CRYPTO_ALG_ASYNC |
1536 CRYPTO_ALG_KERN_DRIVER_ONLY,
1537 .cra_blocksize = SHA384_BLOCK_SIZE,
1538 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1539 .cra_init = safexcel_ahash_cra_init,
1540 .cra_exit = safexcel_ahash_cra_exit,
1541 .cra_module = THIS_MODULE,
1542 },
1543 },
1544 },
1545 };
1546
safexcel_md5_init(struct ahash_request * areq)1547 static int safexcel_md5_init(struct ahash_request *areq)
1548 {
1549 struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1550 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1551
1552 memset(req, 0, sizeof(*req));
1553
1554 req->state[0] = MD5_H0;
1555 req->state[1] = MD5_H1;
1556 req->state[2] = MD5_H2;
1557 req->state[3] = MD5_H3;
1558
1559 ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
1560 req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1561 req->state_sz = MD5_DIGEST_SIZE;
1562
1563 return 0;
1564 }
1565
safexcel_md5_digest(struct ahash_request * areq)1566 static int safexcel_md5_digest(struct ahash_request *areq)
1567 {
1568 int ret = safexcel_md5_init(areq);
1569
1570 if (ret)
1571 return ret;
1572
1573 return safexcel_ahash_finup(areq);
1574 }
1575
1576 struct safexcel_alg_template safexcel_alg_md5 = {
1577 .type = SAFEXCEL_ALG_TYPE_AHASH,
1578 .engines = EIP97IES | EIP197B | EIP197D,
1579 .alg.ahash = {
1580 .init = safexcel_md5_init,
1581 .update = safexcel_ahash_update,
1582 .final = safexcel_ahash_final,
1583 .finup = safexcel_ahash_finup,
1584 .digest = safexcel_md5_digest,
1585 .export = safexcel_ahash_export,
1586 .import = safexcel_ahash_import,
1587 .halg = {
1588 .digestsize = MD5_DIGEST_SIZE,
1589 .statesize = sizeof(struct safexcel_ahash_export_state),
1590 .base = {
1591 .cra_name = "md5",
1592 .cra_driver_name = "safexcel-md5",
1593 .cra_priority = 300,
1594 .cra_flags = CRYPTO_ALG_ASYNC |
1595 CRYPTO_ALG_KERN_DRIVER_ONLY,
1596 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1597 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1598 .cra_init = safexcel_ahash_cra_init,
1599 .cra_exit = safexcel_ahash_cra_exit,
1600 .cra_module = THIS_MODULE,
1601 },
1602 },
1603 },
1604 };
1605
safexcel_hmac_md5_init(struct ahash_request * areq)1606 static int safexcel_hmac_md5_init(struct ahash_request *areq)
1607 {
1608 struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1609
1610 safexcel_md5_init(areq);
1611 req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
1612 return 0;
1613 }
1614
safexcel_hmac_md5_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)1615 static int safexcel_hmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1616 unsigned int keylen)
1617 {
1618 return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-md5",
1619 MD5_DIGEST_SIZE);
1620 }
1621
safexcel_hmac_md5_digest(struct ahash_request * areq)1622 static int safexcel_hmac_md5_digest(struct ahash_request *areq)
1623 {
1624 int ret = safexcel_hmac_md5_init(areq);
1625
1626 if (ret)
1627 return ret;
1628
1629 return safexcel_ahash_finup(areq);
1630 }
1631
1632 struct safexcel_alg_template safexcel_alg_hmac_md5 = {
1633 .type = SAFEXCEL_ALG_TYPE_AHASH,
1634 .engines = EIP97IES | EIP197B | EIP197D,
1635 .alg.ahash = {
1636 .init = safexcel_hmac_md5_init,
1637 .update = safexcel_ahash_update,
1638 .final = safexcel_ahash_final,
1639 .finup = safexcel_ahash_finup,
1640 .digest = safexcel_hmac_md5_digest,
1641 .setkey = safexcel_hmac_md5_setkey,
1642 .export = safexcel_ahash_export,
1643 .import = safexcel_ahash_import,
1644 .halg = {
1645 .digestsize = MD5_DIGEST_SIZE,
1646 .statesize = sizeof(struct safexcel_ahash_export_state),
1647 .base = {
1648 .cra_name = "hmac(md5)",
1649 .cra_driver_name = "safexcel-hmac-md5",
1650 .cra_priority = 300,
1651 .cra_flags = CRYPTO_ALG_ASYNC |
1652 CRYPTO_ALG_KERN_DRIVER_ONLY,
1653 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1654 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1655 .cra_init = safexcel_ahash_cra_init,
1656 .cra_exit = safexcel_ahash_cra_exit,
1657 .cra_module = THIS_MODULE,
1658 },
1659 },
1660 },
1661 };
1662