1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
4 *
5 * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
6 * Author: Arnaud Ebalard <arno@natisbad.org>
7 *
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 */
11
12 #include <crypto/aes.h>
13 #include <crypto/internal/des.h>
14 #include <linux/device.h>
15 #include <linux/dma-mapping.h>
16
17 #include "cesa.h"
18
19 struct mv_cesa_des_ctx {
20 struct mv_cesa_ctx base;
21 u8 key[DES_KEY_SIZE];
22 };
23
24 struct mv_cesa_des3_ctx {
25 struct mv_cesa_ctx base;
26 u8 key[DES3_EDE_KEY_SIZE];
27 };
28
29 struct mv_cesa_aes_ctx {
30 struct mv_cesa_ctx base;
31 struct crypto_aes_ctx aes;
32 };
33
34 struct mv_cesa_skcipher_dma_iter {
35 struct mv_cesa_dma_iter base;
36 struct mv_cesa_sg_dma_iter src;
37 struct mv_cesa_sg_dma_iter dst;
38 };
39
40 static inline void
mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter * iter,struct skcipher_request * req)41 mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
42 struct skcipher_request *req)
43 {
44 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
45 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
46 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
47 }
48
49 static inline bool
mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter * iter)50 mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
51 {
52 iter->src.op_offset = 0;
53 iter->dst.op_offset = 0;
54
55 return mv_cesa_req_dma_iter_next_op(&iter->base);
56 }
57
58 static inline void
mv_cesa_skcipher_dma_cleanup(struct skcipher_request * req)59 mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
60 {
61 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
62
63 if (req->dst != req->src) {
64 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
65 DMA_FROM_DEVICE);
66 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
67 DMA_TO_DEVICE);
68 } else {
69 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
70 DMA_BIDIRECTIONAL);
71 }
72 mv_cesa_dma_cleanup(&creq->base);
73 }
74
mv_cesa_skcipher_cleanup(struct skcipher_request * req)75 static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
76 {
77 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
78
79 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
80 mv_cesa_skcipher_dma_cleanup(req);
81 }
82
mv_cesa_skcipher_std_step(struct skcipher_request * req)83 static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
84 {
85 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
86 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
87 struct mv_cesa_engine *engine = creq->base.engine;
88 size_t len = min_t(size_t, req->cryptlen - sreq->offset,
89 CESA_SA_SRAM_PAYLOAD_SIZE);
90
91 mv_cesa_adjust_op(engine, &sreq->op);
92 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
93
94 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
95 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
96 len, sreq->offset);
97
98 sreq->size = len;
99 mv_cesa_set_crypt_op_len(&sreq->op, len);
100
101 /* FIXME: only update enc_len field */
102 if (!sreq->skip_ctx) {
103 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
104 sreq->skip_ctx = true;
105 } else {
106 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
107 }
108
109 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
110 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
111 WARN_ON(readl(engine->regs + CESA_SA_CMD) &
112 CESA_SA_CMD_EN_CESA_SA_ACCL0);
113 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
114 }
115
mv_cesa_skcipher_std_process(struct skcipher_request * req,u32 status)116 static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
117 u32 status)
118 {
119 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
120 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
121 struct mv_cesa_engine *engine = creq->base.engine;
122 size_t len;
123
124 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
125 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
126 sreq->size, sreq->offset);
127
128 sreq->offset += len;
129 if (sreq->offset < req->cryptlen)
130 return -EINPROGRESS;
131
132 return 0;
133 }
134
mv_cesa_skcipher_process(struct crypto_async_request * req,u32 status)135 static int mv_cesa_skcipher_process(struct crypto_async_request *req,
136 u32 status)
137 {
138 struct skcipher_request *skreq = skcipher_request_cast(req);
139 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
140 struct mv_cesa_req *basereq = &creq->base;
141
142 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
143 return mv_cesa_skcipher_std_process(skreq, status);
144
145 return mv_cesa_dma_process(basereq, status);
146 }
147
mv_cesa_skcipher_step(struct crypto_async_request * req)148 static void mv_cesa_skcipher_step(struct crypto_async_request *req)
149 {
150 struct skcipher_request *skreq = skcipher_request_cast(req);
151 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
152
153 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
154 mv_cesa_dma_step(&creq->base);
155 else
156 mv_cesa_skcipher_std_step(skreq);
157 }
158
159 static inline void
mv_cesa_skcipher_dma_prepare(struct skcipher_request * req)160 mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
161 {
162 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
163 struct mv_cesa_req *basereq = &creq->base;
164
165 mv_cesa_dma_prepare(basereq, basereq->engine);
166 }
167
168 static inline void
mv_cesa_skcipher_std_prepare(struct skcipher_request * req)169 mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
170 {
171 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
172 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
173
174 sreq->size = 0;
175 sreq->offset = 0;
176 }
177
mv_cesa_skcipher_prepare(struct crypto_async_request * req,struct mv_cesa_engine * engine)178 static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
179 struct mv_cesa_engine *engine)
180 {
181 struct skcipher_request *skreq = skcipher_request_cast(req);
182 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
183
184 creq->base.engine = engine;
185
186 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
187 mv_cesa_skcipher_dma_prepare(skreq);
188 else
189 mv_cesa_skcipher_std_prepare(skreq);
190 }
191
192 static inline void
mv_cesa_skcipher_req_cleanup(struct crypto_async_request * req)193 mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
194 {
195 struct skcipher_request *skreq = skcipher_request_cast(req);
196
197 mv_cesa_skcipher_cleanup(skreq);
198 }
199
200 static void
mv_cesa_skcipher_complete(struct crypto_async_request * req)201 mv_cesa_skcipher_complete(struct crypto_async_request *req)
202 {
203 struct skcipher_request *skreq = skcipher_request_cast(req);
204 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
205 struct mv_cesa_engine *engine = creq->base.engine;
206 unsigned int ivsize;
207
208 atomic_sub(skreq->cryptlen, &engine->load);
209 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
210
211 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
212 struct mv_cesa_req *basereq;
213
214 basereq = &creq->base;
215 memcpy(skreq->iv, basereq->chain.last->op->ctx.skcipher.iv,
216 ivsize);
217 } else {
218 memcpy_fromio(skreq->iv,
219 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
220 ivsize);
221 }
222 }
223
224 static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
225 .step = mv_cesa_skcipher_step,
226 .process = mv_cesa_skcipher_process,
227 .cleanup = mv_cesa_skcipher_req_cleanup,
228 .complete = mv_cesa_skcipher_complete,
229 };
230
mv_cesa_skcipher_cra_exit(struct crypto_tfm * tfm)231 static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
232 {
233 void *ctx = crypto_tfm_ctx(tfm);
234
235 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
236 }
237
mv_cesa_skcipher_cra_init(struct crypto_tfm * tfm)238 static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
239 {
240 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
241
242 ctx->ops = &mv_cesa_skcipher_req_ops;
243
244 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
245 sizeof(struct mv_cesa_skcipher_req));
246
247 return 0;
248 }
249
mv_cesa_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)250 static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
251 unsigned int len)
252 {
253 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
254 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
255 int remaining;
256 int offset;
257 int ret;
258 int i;
259
260 ret = aes_expandkey(&ctx->aes, key, len);
261 if (ret)
262 return ret;
263
264 remaining = (ctx->aes.key_length - 16) / 4;
265 offset = ctx->aes.key_length + 24 - remaining;
266 for (i = 0; i < remaining; i++)
267 ctx->aes.key_dec[4 + i] = ctx->aes.key_enc[offset + i];
268
269 return 0;
270 }
271
mv_cesa_des_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)272 static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
273 unsigned int len)
274 {
275 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
276 int err;
277
278 err = verify_skcipher_des_key(cipher, key);
279 if (err)
280 return err;
281
282 memcpy(ctx->key, key, DES_KEY_SIZE);
283
284 return 0;
285 }
286
mv_cesa_des3_ede_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int len)287 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
288 const u8 *key, unsigned int len)
289 {
290 struct mv_cesa_des_ctx *ctx = crypto_skcipher_ctx(cipher);
291 int err;
292
293 err = verify_skcipher_des3_key(cipher, key);
294 if (err)
295 return err;
296
297 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
298
299 return 0;
300 }
301
mv_cesa_skcipher_dma_req_init(struct skcipher_request * req,const struct mv_cesa_op_ctx * op_templ)302 static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
303 const struct mv_cesa_op_ctx *op_templ)
304 {
305 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
306 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
307 GFP_KERNEL : GFP_ATOMIC;
308 struct mv_cesa_req *basereq = &creq->base;
309 struct mv_cesa_skcipher_dma_iter iter;
310 bool skip_ctx = false;
311 int ret;
312
313 basereq->chain.first = NULL;
314 basereq->chain.last = NULL;
315
316 if (req->src != req->dst) {
317 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
318 DMA_TO_DEVICE);
319 if (!ret)
320 return -ENOMEM;
321
322 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
323 DMA_FROM_DEVICE);
324 if (!ret) {
325 ret = -ENOMEM;
326 goto err_unmap_src;
327 }
328 } else {
329 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
330 DMA_BIDIRECTIONAL);
331 if (!ret)
332 return -ENOMEM;
333 }
334
335 mv_cesa_tdma_desc_iter_init(&basereq->chain);
336 mv_cesa_skcipher_req_iter_init(&iter, req);
337
338 do {
339 struct mv_cesa_op_ctx *op;
340
341 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx,
342 flags);
343 if (IS_ERR(op)) {
344 ret = PTR_ERR(op);
345 goto err_free_tdma;
346 }
347 skip_ctx = true;
348
349 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
350
351 /* Add input transfers */
352 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
353 &iter.src, flags);
354 if (ret)
355 goto err_free_tdma;
356
357 /* Add dummy desc to launch the crypto operation */
358 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
359 if (ret)
360 goto err_free_tdma;
361
362 /* Add output transfers */
363 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
364 &iter.dst, flags);
365 if (ret)
366 goto err_free_tdma;
367
368 } while (mv_cesa_skcipher_req_iter_next_op(&iter));
369
370 /* Add output data for IV */
371 ret = mv_cesa_dma_add_result_op(&basereq->chain,
372 CESA_SA_CFG_SRAM_OFFSET,
373 CESA_SA_DATA_SRAM_OFFSET,
374 CESA_TDMA_SRC_IN_SRAM, flags);
375
376 if (ret)
377 goto err_free_tdma;
378
379 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
380
381 return 0;
382
383 err_free_tdma:
384 mv_cesa_dma_cleanup(basereq);
385 if (req->dst != req->src)
386 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
387 DMA_FROM_DEVICE);
388
389 err_unmap_src:
390 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
391 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
392
393 return ret;
394 }
395
396 static inline int
mv_cesa_skcipher_std_req_init(struct skcipher_request * req,const struct mv_cesa_op_ctx * op_templ)397 mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
398 const struct mv_cesa_op_ctx *op_templ)
399 {
400 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
401 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
402 struct mv_cesa_req *basereq = &creq->base;
403
404 sreq->op = *op_templ;
405 sreq->skip_ctx = false;
406 basereq->chain.first = NULL;
407 basereq->chain.last = NULL;
408
409 return 0;
410 }
411
mv_cesa_skcipher_req_init(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)412 static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
413 struct mv_cesa_op_ctx *tmpl)
414 {
415 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
416 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
417 unsigned int blksize = crypto_skcipher_blocksize(tfm);
418 int ret;
419
420 if (!IS_ALIGNED(req->cryptlen, blksize))
421 return -EINVAL;
422
423 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
424 if (creq->src_nents < 0) {
425 dev_err(cesa_dev->dev, "Invalid number of src SG");
426 return creq->src_nents;
427 }
428 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
429 if (creq->dst_nents < 0) {
430 dev_err(cesa_dev->dev, "Invalid number of dst SG");
431 return creq->dst_nents;
432 }
433
434 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
435 CESA_SA_DESC_CFG_OP_MSK);
436
437 if (cesa_dev->caps->has_tdma)
438 ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
439 else
440 ret = mv_cesa_skcipher_std_req_init(req, tmpl);
441
442 return ret;
443 }
444
mv_cesa_skcipher_queue_req(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)445 static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
446 struct mv_cesa_op_ctx *tmpl)
447 {
448 int ret;
449 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
450 struct mv_cesa_engine *engine;
451
452 ret = mv_cesa_skcipher_req_init(req, tmpl);
453 if (ret)
454 return ret;
455
456 engine = mv_cesa_select_engine(req->cryptlen);
457 mv_cesa_skcipher_prepare(&req->base, engine);
458
459 ret = mv_cesa_queue_req(&req->base, &creq->base);
460
461 if (mv_cesa_req_needs_cleanup(&req->base, ret))
462 mv_cesa_skcipher_cleanup(req);
463
464 return ret;
465 }
466
mv_cesa_des_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)467 static int mv_cesa_des_op(struct skcipher_request *req,
468 struct mv_cesa_op_ctx *tmpl)
469 {
470 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
471
472 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
473 CESA_SA_DESC_CFG_CRYPTM_MSK);
474
475 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES_KEY_SIZE);
476
477 return mv_cesa_skcipher_queue_req(req, tmpl);
478 }
479
mv_cesa_ecb_des_encrypt(struct skcipher_request * req)480 static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
481 {
482 struct mv_cesa_op_ctx tmpl;
483
484 mv_cesa_set_op_cfg(&tmpl,
485 CESA_SA_DESC_CFG_CRYPTCM_ECB |
486 CESA_SA_DESC_CFG_DIR_ENC);
487
488 return mv_cesa_des_op(req, &tmpl);
489 }
490
mv_cesa_ecb_des_decrypt(struct skcipher_request * req)491 static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
492 {
493 struct mv_cesa_op_ctx tmpl;
494
495 mv_cesa_set_op_cfg(&tmpl,
496 CESA_SA_DESC_CFG_CRYPTCM_ECB |
497 CESA_SA_DESC_CFG_DIR_DEC);
498
499 return mv_cesa_des_op(req, &tmpl);
500 }
501
502 struct skcipher_alg mv_cesa_ecb_des_alg = {
503 .setkey = mv_cesa_des_setkey,
504 .encrypt = mv_cesa_ecb_des_encrypt,
505 .decrypt = mv_cesa_ecb_des_decrypt,
506 .min_keysize = DES_KEY_SIZE,
507 .max_keysize = DES_KEY_SIZE,
508 .base = {
509 .cra_name = "ecb(des)",
510 .cra_driver_name = "mv-ecb-des",
511 .cra_priority = 300,
512 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
513 CRYPTO_ALG_ALLOCATES_MEMORY,
514 .cra_blocksize = DES_BLOCK_SIZE,
515 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
516 .cra_alignmask = 0,
517 .cra_module = THIS_MODULE,
518 .cra_init = mv_cesa_skcipher_cra_init,
519 .cra_exit = mv_cesa_skcipher_cra_exit,
520 },
521 };
522
mv_cesa_cbc_des_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)523 static int mv_cesa_cbc_des_op(struct skcipher_request *req,
524 struct mv_cesa_op_ctx *tmpl)
525 {
526 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
527 CESA_SA_DESC_CFG_CRYPTCM_MSK);
528
529 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES_BLOCK_SIZE);
530
531 return mv_cesa_des_op(req, tmpl);
532 }
533
mv_cesa_cbc_des_encrypt(struct skcipher_request * req)534 static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
535 {
536 struct mv_cesa_op_ctx tmpl;
537
538 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
539
540 return mv_cesa_cbc_des_op(req, &tmpl);
541 }
542
mv_cesa_cbc_des_decrypt(struct skcipher_request * req)543 static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
544 {
545 struct mv_cesa_op_ctx tmpl;
546
547 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
548
549 return mv_cesa_cbc_des_op(req, &tmpl);
550 }
551
552 struct skcipher_alg mv_cesa_cbc_des_alg = {
553 .setkey = mv_cesa_des_setkey,
554 .encrypt = mv_cesa_cbc_des_encrypt,
555 .decrypt = mv_cesa_cbc_des_decrypt,
556 .min_keysize = DES_KEY_SIZE,
557 .max_keysize = DES_KEY_SIZE,
558 .ivsize = DES_BLOCK_SIZE,
559 .base = {
560 .cra_name = "cbc(des)",
561 .cra_driver_name = "mv-cbc-des",
562 .cra_priority = 300,
563 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
564 CRYPTO_ALG_ALLOCATES_MEMORY,
565 .cra_blocksize = DES_BLOCK_SIZE,
566 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
567 .cra_alignmask = 0,
568 .cra_module = THIS_MODULE,
569 .cra_init = mv_cesa_skcipher_cra_init,
570 .cra_exit = mv_cesa_skcipher_cra_exit,
571 },
572 };
573
mv_cesa_des3_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)574 static int mv_cesa_des3_op(struct skcipher_request *req,
575 struct mv_cesa_op_ctx *tmpl)
576 {
577 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
578
579 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
580 CESA_SA_DESC_CFG_CRYPTM_MSK);
581
582 memcpy(tmpl->ctx.skcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
583
584 return mv_cesa_skcipher_queue_req(req, tmpl);
585 }
586
mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request * req)587 static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
588 {
589 struct mv_cesa_op_ctx tmpl;
590
591 mv_cesa_set_op_cfg(&tmpl,
592 CESA_SA_DESC_CFG_CRYPTCM_ECB |
593 CESA_SA_DESC_CFG_3DES_EDE |
594 CESA_SA_DESC_CFG_DIR_ENC);
595
596 return mv_cesa_des3_op(req, &tmpl);
597 }
598
mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request * req)599 static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
600 {
601 struct mv_cesa_op_ctx tmpl;
602
603 mv_cesa_set_op_cfg(&tmpl,
604 CESA_SA_DESC_CFG_CRYPTCM_ECB |
605 CESA_SA_DESC_CFG_3DES_EDE |
606 CESA_SA_DESC_CFG_DIR_DEC);
607
608 return mv_cesa_des3_op(req, &tmpl);
609 }
610
611 struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
612 .setkey = mv_cesa_des3_ede_setkey,
613 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
614 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
615 .min_keysize = DES3_EDE_KEY_SIZE,
616 .max_keysize = DES3_EDE_KEY_SIZE,
617 .ivsize = DES3_EDE_BLOCK_SIZE,
618 .base = {
619 .cra_name = "ecb(des3_ede)",
620 .cra_driver_name = "mv-ecb-des3-ede",
621 .cra_priority = 300,
622 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
623 CRYPTO_ALG_ALLOCATES_MEMORY,
624 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
625 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
626 .cra_alignmask = 0,
627 .cra_module = THIS_MODULE,
628 .cra_init = mv_cesa_skcipher_cra_init,
629 .cra_exit = mv_cesa_skcipher_cra_exit,
630 },
631 };
632
mv_cesa_cbc_des3_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)633 static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
634 struct mv_cesa_op_ctx *tmpl)
635 {
636 memcpy(tmpl->ctx.skcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
637
638 return mv_cesa_des3_op(req, tmpl);
639 }
640
mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request * req)641 static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
642 {
643 struct mv_cesa_op_ctx tmpl;
644
645 mv_cesa_set_op_cfg(&tmpl,
646 CESA_SA_DESC_CFG_CRYPTCM_CBC |
647 CESA_SA_DESC_CFG_3DES_EDE |
648 CESA_SA_DESC_CFG_DIR_ENC);
649
650 return mv_cesa_cbc_des3_op(req, &tmpl);
651 }
652
mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request * req)653 static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
654 {
655 struct mv_cesa_op_ctx tmpl;
656
657 mv_cesa_set_op_cfg(&tmpl,
658 CESA_SA_DESC_CFG_CRYPTCM_CBC |
659 CESA_SA_DESC_CFG_3DES_EDE |
660 CESA_SA_DESC_CFG_DIR_DEC);
661
662 return mv_cesa_cbc_des3_op(req, &tmpl);
663 }
664
665 struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
666 .setkey = mv_cesa_des3_ede_setkey,
667 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
668 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
669 .min_keysize = DES3_EDE_KEY_SIZE,
670 .max_keysize = DES3_EDE_KEY_SIZE,
671 .ivsize = DES3_EDE_BLOCK_SIZE,
672 .base = {
673 .cra_name = "cbc(des3_ede)",
674 .cra_driver_name = "mv-cbc-des3-ede",
675 .cra_priority = 300,
676 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
677 CRYPTO_ALG_ALLOCATES_MEMORY,
678 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
679 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
680 .cra_alignmask = 0,
681 .cra_module = THIS_MODULE,
682 .cra_init = mv_cesa_skcipher_cra_init,
683 .cra_exit = mv_cesa_skcipher_cra_exit,
684 },
685 };
686
mv_cesa_aes_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)687 static int mv_cesa_aes_op(struct skcipher_request *req,
688 struct mv_cesa_op_ctx *tmpl)
689 {
690 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
691 int i;
692 u32 *key;
693 u32 cfg;
694
695 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
696
697 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
698 key = ctx->aes.key_dec;
699 else
700 key = ctx->aes.key_enc;
701
702 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
703 tmpl->ctx.skcipher.key[i] = cpu_to_le32(key[i]);
704
705 if (ctx->aes.key_length == 24)
706 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
707 else if (ctx->aes.key_length == 32)
708 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
709
710 mv_cesa_update_op_cfg(tmpl, cfg,
711 CESA_SA_DESC_CFG_CRYPTM_MSK |
712 CESA_SA_DESC_CFG_AES_LEN_MSK);
713
714 return mv_cesa_skcipher_queue_req(req, tmpl);
715 }
716
mv_cesa_ecb_aes_encrypt(struct skcipher_request * req)717 static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
718 {
719 struct mv_cesa_op_ctx tmpl;
720
721 mv_cesa_set_op_cfg(&tmpl,
722 CESA_SA_DESC_CFG_CRYPTCM_ECB |
723 CESA_SA_DESC_CFG_DIR_ENC);
724
725 return mv_cesa_aes_op(req, &tmpl);
726 }
727
mv_cesa_ecb_aes_decrypt(struct skcipher_request * req)728 static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
729 {
730 struct mv_cesa_op_ctx tmpl;
731
732 mv_cesa_set_op_cfg(&tmpl,
733 CESA_SA_DESC_CFG_CRYPTCM_ECB |
734 CESA_SA_DESC_CFG_DIR_DEC);
735
736 return mv_cesa_aes_op(req, &tmpl);
737 }
738
739 struct skcipher_alg mv_cesa_ecb_aes_alg = {
740 .setkey = mv_cesa_aes_setkey,
741 .encrypt = mv_cesa_ecb_aes_encrypt,
742 .decrypt = mv_cesa_ecb_aes_decrypt,
743 .min_keysize = AES_MIN_KEY_SIZE,
744 .max_keysize = AES_MAX_KEY_SIZE,
745 .base = {
746 .cra_name = "ecb(aes)",
747 .cra_driver_name = "mv-ecb-aes",
748 .cra_priority = 300,
749 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
750 CRYPTO_ALG_ALLOCATES_MEMORY,
751 .cra_blocksize = AES_BLOCK_SIZE,
752 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
753 .cra_alignmask = 0,
754 .cra_module = THIS_MODULE,
755 .cra_init = mv_cesa_skcipher_cra_init,
756 .cra_exit = mv_cesa_skcipher_cra_exit,
757 },
758 };
759
mv_cesa_cbc_aes_op(struct skcipher_request * req,struct mv_cesa_op_ctx * tmpl)760 static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
761 struct mv_cesa_op_ctx *tmpl)
762 {
763 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
764 CESA_SA_DESC_CFG_CRYPTCM_MSK);
765 memcpy(tmpl->ctx.skcipher.iv, req->iv, AES_BLOCK_SIZE);
766
767 return mv_cesa_aes_op(req, tmpl);
768 }
769
mv_cesa_cbc_aes_encrypt(struct skcipher_request * req)770 static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
771 {
772 struct mv_cesa_op_ctx tmpl;
773
774 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
775
776 return mv_cesa_cbc_aes_op(req, &tmpl);
777 }
778
mv_cesa_cbc_aes_decrypt(struct skcipher_request * req)779 static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
780 {
781 struct mv_cesa_op_ctx tmpl;
782
783 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
784
785 return mv_cesa_cbc_aes_op(req, &tmpl);
786 }
787
788 struct skcipher_alg mv_cesa_cbc_aes_alg = {
789 .setkey = mv_cesa_aes_setkey,
790 .encrypt = mv_cesa_cbc_aes_encrypt,
791 .decrypt = mv_cesa_cbc_aes_decrypt,
792 .min_keysize = AES_MIN_KEY_SIZE,
793 .max_keysize = AES_MAX_KEY_SIZE,
794 .ivsize = AES_BLOCK_SIZE,
795 .base = {
796 .cra_name = "cbc(aes)",
797 .cra_driver_name = "mv-cbc-aes",
798 .cra_priority = 300,
799 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
800 CRYPTO_ALG_ALLOCATES_MEMORY,
801 .cra_blocksize = AES_BLOCK_SIZE,
802 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
803 .cra_alignmask = 0,
804 .cra_module = THIS_MODULE,
805 .cra_init = mv_cesa_skcipher_cra_init,
806 .cra_exit = mv_cesa_skcipher_cra_exit,
807 },
808 };
809