1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/moduleparam.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <crypto/aes.h>
13 #include <crypto/internal/des.h>
14 #include <crypto/internal/skcipher.h>
15
16 #include "cipher.h"
17
18 static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
19 module_param(aes_sw_max_len, uint, 0644);
20 MODULE_PARM_DESC(aes_sw_max_len,
21 "Only use hardware for AES requests larger than this "
22 "[0=always use hardware; anything <16 breaks AES-GCM; default="
23 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
24
25 static LIST_HEAD(skcipher_algs);
26
qce_skcipher_done(void * data)27 static void qce_skcipher_done(void *data)
28 {
29 struct crypto_async_request *async_req = data;
30 struct skcipher_request *req = skcipher_request_cast(async_req);
31 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
32 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
33 struct qce_device *qce = tmpl->qce;
34 struct qce_result_dump *result_buf = qce->dma.result_buf;
35 enum dma_data_direction dir_src, dir_dst;
36 u32 status;
37 int error;
38 bool diff_dst;
39
40 diff_dst = (req->src != req->dst) ? true : false;
41 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
42 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
43
44 error = qce_dma_terminate_all(&qce->dma);
45 if (error)
46 dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
47 error);
48
49 if (diff_dst)
50 dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
51 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
52
53 sg_free_table(&rctx->dst_tbl);
54
55 error = qce_check_status(qce, &status);
56 if (error < 0)
57 dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
58
59 memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
60 qce->async_req_done(tmpl->qce, error);
61 }
62
63 static int
qce_skcipher_async_req_handle(struct crypto_async_request * async_req)64 qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
65 {
66 struct skcipher_request *req = skcipher_request_cast(async_req);
67 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
68 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
69 struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
70 struct qce_device *qce = tmpl->qce;
71 enum dma_data_direction dir_src, dir_dst;
72 struct scatterlist *sg;
73 bool diff_dst;
74 gfp_t gfp;
75 int dst_nents, src_nents, ret;
76
77 rctx->iv = req->iv;
78 rctx->ivsize = crypto_skcipher_ivsize(skcipher);
79 rctx->cryptlen = req->cryptlen;
80
81 diff_dst = (req->src != req->dst) ? true : false;
82 dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
83 dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
84
85 rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
86 if (diff_dst)
87 rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
88 else
89 rctx->dst_nents = rctx->src_nents;
90 if (rctx->src_nents < 0) {
91 dev_err(qce->dev, "Invalid numbers of src SG.\n");
92 return rctx->src_nents;
93 }
94 if (rctx->dst_nents < 0) {
95 dev_err(qce->dev, "Invalid numbers of dst SG.\n");
96 return -rctx->dst_nents;
97 }
98
99 rctx->dst_nents += 1;
100
101 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
102 GFP_KERNEL : GFP_ATOMIC;
103
104 ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
105 if (ret)
106 return ret;
107
108 sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
109
110 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
111 if (IS_ERR(sg)) {
112 ret = PTR_ERR(sg);
113 goto error_free;
114 }
115
116 sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
117 QCE_RESULT_BUF_SZ);
118 if (IS_ERR(sg)) {
119 ret = PTR_ERR(sg);
120 goto error_free;
121 }
122
123 sg_mark_end(sg);
124 rctx->dst_sg = rctx->dst_tbl.sgl;
125
126 dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
127 if (dst_nents < 0) {
128 ret = dst_nents;
129 goto error_free;
130 }
131
132 if (diff_dst) {
133 src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
134 if (src_nents < 0) {
135 ret = src_nents;
136 goto error_unmap_dst;
137 }
138 rctx->src_sg = req->src;
139 } else {
140 rctx->src_sg = rctx->dst_sg;
141 src_nents = dst_nents - 1;
142 }
143
144 ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
145 rctx->dst_sg, dst_nents,
146 qce_skcipher_done, async_req);
147 if (ret)
148 goto error_unmap_src;
149
150 qce_dma_issue_pending(&qce->dma);
151
152 ret = qce_start(async_req, tmpl->crypto_alg_type);
153 if (ret)
154 goto error_terminate;
155
156 return 0;
157
158 error_terminate:
159 qce_dma_terminate_all(&qce->dma);
160 error_unmap_src:
161 if (diff_dst)
162 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
163 error_unmap_dst:
164 dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
165 error_free:
166 sg_free_table(&rctx->dst_tbl);
167 return ret;
168 }
169
qce_skcipher_setkey(struct crypto_skcipher * ablk,const u8 * key,unsigned int keylen)170 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
171 unsigned int keylen)
172 {
173 struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
174 struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
175 unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
176 unsigned int __keylen;
177 int ret;
178
179 if (!key || !keylen)
180 return -EINVAL;
181
182 /*
183 * AES XTS key1 = key2 not supported by crypto engine.
184 * Revisit to request a fallback cipher in this case.
185 */
186 if (IS_XTS(flags)) {
187 __keylen = keylen >> 1;
188 if (!memcmp(key, key + __keylen, __keylen))
189 return -ENOKEY;
190 } else {
191 __keylen = keylen;
192 }
193
194 switch (__keylen) {
195 case AES_KEYSIZE_128:
196 case AES_KEYSIZE_256:
197 memcpy(ctx->enc_key, key, keylen);
198 break;
199 case AES_KEYSIZE_192:
200 break;
201 default:
202 return -EINVAL;
203 }
204
205 ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
206 if (!ret)
207 ctx->enc_keylen = keylen;
208 return ret;
209 }
210
qce_des_setkey(struct crypto_skcipher * ablk,const u8 * key,unsigned int keylen)211 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
212 unsigned int keylen)
213 {
214 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
215 int err;
216
217 err = verify_skcipher_des_key(ablk, key);
218 if (err)
219 return err;
220
221 ctx->enc_keylen = keylen;
222 memcpy(ctx->enc_key, key, keylen);
223 return 0;
224 }
225
qce_des3_setkey(struct crypto_skcipher * ablk,const u8 * key,unsigned int keylen)226 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
227 unsigned int keylen)
228 {
229 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
230 u32 _key[6];
231 int err;
232
233 err = verify_skcipher_des3_key(ablk, key);
234 if (err)
235 return err;
236
237 /*
238 * The crypto engine does not support any two keys
239 * being the same for triple des algorithms. The
240 * verify_skcipher_des3_key does not check for all the
241 * below conditions. Return -ENOKEY in case any two keys
242 * are the same. Revisit to see if a fallback cipher
243 * is needed to handle this condition.
244 */
245 memcpy(_key, key, DES3_EDE_KEY_SIZE);
246 if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) ||
247 !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) ||
248 !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5])))
249 return -ENOKEY;
250
251 ctx->enc_keylen = keylen;
252 memcpy(ctx->enc_key, key, keylen);
253 return 0;
254 }
255
qce_skcipher_crypt(struct skcipher_request * req,int encrypt)256 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
257 {
258 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
259 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
260 struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
261 struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
262 unsigned int blocksize = crypto_skcipher_blocksize(tfm);
263 int keylen;
264 int ret;
265
266 rctx->flags = tmpl->alg_flags;
267 rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
268 keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
269
270 /* CE does not handle 0 length messages */
271 if (!req->cryptlen)
272 return 0;
273
274 /*
275 * ECB and CBC algorithms require message lengths to be
276 * multiples of block size.
277 */
278 if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags))
279 if (!IS_ALIGNED(req->cryptlen, blocksize))
280 return -EINVAL;
281
282 /*
283 * Conditions for requesting a fallback cipher
284 * AES-192 (not supported by crypto engine (CE))
285 * AES-XTS request with len <= 512 byte (not recommended to use CE)
286 * AES-XTS request with len > QCE_SECTOR_SIZE and
287 * is not a multiple of it.(Revisit this condition to check if it is
288 * needed in all versions of CE)
289 */
290 if (IS_AES(rctx->flags) &&
291 ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
292 (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) ||
293 (req->cryptlen > QCE_SECTOR_SIZE &&
294 req->cryptlen % QCE_SECTOR_SIZE))))) {
295 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
296 skcipher_request_set_callback(&rctx->fallback_req,
297 req->base.flags,
298 req->base.complete,
299 req->base.data);
300 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
301 req->dst, req->cryptlen, req->iv);
302 ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
303 crypto_skcipher_decrypt(&rctx->fallback_req);
304 return ret;
305 }
306
307 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
308 }
309
qce_skcipher_encrypt(struct skcipher_request * req)310 static int qce_skcipher_encrypt(struct skcipher_request *req)
311 {
312 return qce_skcipher_crypt(req, 1);
313 }
314
qce_skcipher_decrypt(struct skcipher_request * req)315 static int qce_skcipher_decrypt(struct skcipher_request *req)
316 {
317 return qce_skcipher_crypt(req, 0);
318 }
319
qce_skcipher_init(struct crypto_skcipher * tfm)320 static int qce_skcipher_init(struct crypto_skcipher *tfm)
321 {
322 /* take the size without the fallback skcipher_request at the end */
323 crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
324 fallback_req));
325 return 0;
326 }
327
qce_skcipher_init_fallback(struct crypto_skcipher * tfm)328 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
329 {
330 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
331
332 ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
333 0, CRYPTO_ALG_NEED_FALLBACK);
334 if (IS_ERR(ctx->fallback))
335 return PTR_ERR(ctx->fallback);
336
337 crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
338 crypto_skcipher_reqsize(ctx->fallback));
339 return 0;
340 }
341
qce_skcipher_exit(struct crypto_skcipher * tfm)342 static void qce_skcipher_exit(struct crypto_skcipher *tfm)
343 {
344 struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
345
346 crypto_free_skcipher(ctx->fallback);
347 }
348
349 struct qce_skcipher_def {
350 unsigned long flags;
351 const char *name;
352 const char *drv_name;
353 unsigned int blocksize;
354 unsigned int chunksize;
355 unsigned int ivsize;
356 unsigned int min_keysize;
357 unsigned int max_keysize;
358 };
359
360 static const struct qce_skcipher_def skcipher_def[] = {
361 {
362 .flags = QCE_ALG_AES | QCE_MODE_ECB,
363 .name = "ecb(aes)",
364 .drv_name = "ecb-aes-qce",
365 .blocksize = AES_BLOCK_SIZE,
366 .ivsize = 0,
367 .min_keysize = AES_MIN_KEY_SIZE,
368 .max_keysize = AES_MAX_KEY_SIZE,
369 },
370 {
371 .flags = QCE_ALG_AES | QCE_MODE_CBC,
372 .name = "cbc(aes)",
373 .drv_name = "cbc-aes-qce",
374 .blocksize = AES_BLOCK_SIZE,
375 .ivsize = AES_BLOCK_SIZE,
376 .min_keysize = AES_MIN_KEY_SIZE,
377 .max_keysize = AES_MAX_KEY_SIZE,
378 },
379 {
380 .flags = QCE_ALG_AES | QCE_MODE_CTR,
381 .name = "ctr(aes)",
382 .drv_name = "ctr-aes-qce",
383 .blocksize = 1,
384 .chunksize = AES_BLOCK_SIZE,
385 .ivsize = AES_BLOCK_SIZE,
386 .min_keysize = AES_MIN_KEY_SIZE,
387 .max_keysize = AES_MAX_KEY_SIZE,
388 },
389 {
390 .flags = QCE_ALG_AES | QCE_MODE_XTS,
391 .name = "xts(aes)",
392 .drv_name = "xts-aes-qce",
393 .blocksize = AES_BLOCK_SIZE,
394 .ivsize = AES_BLOCK_SIZE,
395 .min_keysize = AES_MIN_KEY_SIZE * 2,
396 .max_keysize = AES_MAX_KEY_SIZE * 2,
397 },
398 {
399 .flags = QCE_ALG_DES | QCE_MODE_ECB,
400 .name = "ecb(des)",
401 .drv_name = "ecb-des-qce",
402 .blocksize = DES_BLOCK_SIZE,
403 .ivsize = 0,
404 .min_keysize = DES_KEY_SIZE,
405 .max_keysize = DES_KEY_SIZE,
406 },
407 {
408 .flags = QCE_ALG_DES | QCE_MODE_CBC,
409 .name = "cbc(des)",
410 .drv_name = "cbc-des-qce",
411 .blocksize = DES_BLOCK_SIZE,
412 .ivsize = DES_BLOCK_SIZE,
413 .min_keysize = DES_KEY_SIZE,
414 .max_keysize = DES_KEY_SIZE,
415 },
416 {
417 .flags = QCE_ALG_3DES | QCE_MODE_ECB,
418 .name = "ecb(des3_ede)",
419 .drv_name = "ecb-3des-qce",
420 .blocksize = DES3_EDE_BLOCK_SIZE,
421 .ivsize = 0,
422 .min_keysize = DES3_EDE_KEY_SIZE,
423 .max_keysize = DES3_EDE_KEY_SIZE,
424 },
425 {
426 .flags = QCE_ALG_3DES | QCE_MODE_CBC,
427 .name = "cbc(des3_ede)",
428 .drv_name = "cbc-3des-qce",
429 .blocksize = DES3_EDE_BLOCK_SIZE,
430 .ivsize = DES3_EDE_BLOCK_SIZE,
431 .min_keysize = DES3_EDE_KEY_SIZE,
432 .max_keysize = DES3_EDE_KEY_SIZE,
433 },
434 };
435
qce_skcipher_register_one(const struct qce_skcipher_def * def,struct qce_device * qce)436 static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
437 struct qce_device *qce)
438 {
439 struct qce_alg_template *tmpl;
440 struct skcipher_alg *alg;
441 int ret;
442
443 tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
444 if (!tmpl)
445 return -ENOMEM;
446
447 alg = &tmpl->alg.skcipher;
448
449 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
450 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
451 def->drv_name);
452
453 alg->base.cra_blocksize = def->blocksize;
454 alg->chunksize = def->chunksize;
455 alg->ivsize = def->ivsize;
456 alg->min_keysize = def->min_keysize;
457 alg->max_keysize = def->max_keysize;
458 alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey :
459 IS_DES(def->flags) ? qce_des_setkey :
460 qce_skcipher_setkey;
461 alg->encrypt = qce_skcipher_encrypt;
462 alg->decrypt = qce_skcipher_decrypt;
463
464 alg->base.cra_priority = 300;
465 alg->base.cra_flags = CRYPTO_ALG_ASYNC |
466 CRYPTO_ALG_ALLOCATES_MEMORY |
467 CRYPTO_ALG_KERN_DRIVER_ONLY;
468 alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx);
469 alg->base.cra_alignmask = 0;
470 alg->base.cra_module = THIS_MODULE;
471
472 if (IS_AES(def->flags)) {
473 alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
474 alg->init = qce_skcipher_init_fallback;
475 alg->exit = qce_skcipher_exit;
476 } else {
477 alg->init = qce_skcipher_init;
478 }
479
480 INIT_LIST_HEAD(&tmpl->entry);
481 tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
482 tmpl->alg_flags = def->flags;
483 tmpl->qce = qce;
484
485 ret = crypto_register_skcipher(alg);
486 if (ret) {
487 dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
488 kfree(tmpl);
489 return ret;
490 }
491
492 list_add_tail(&tmpl->entry, &skcipher_algs);
493 dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
494 return 0;
495 }
496
qce_skcipher_unregister(struct qce_device * qce)497 static void qce_skcipher_unregister(struct qce_device *qce)
498 {
499 struct qce_alg_template *tmpl, *n;
500
501 list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
502 crypto_unregister_skcipher(&tmpl->alg.skcipher);
503 list_del(&tmpl->entry);
504 kfree(tmpl);
505 }
506 }
507
qce_skcipher_register(struct qce_device * qce)508 static int qce_skcipher_register(struct qce_device *qce)
509 {
510 int ret, i;
511
512 for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
513 ret = qce_skcipher_register_one(&skcipher_def[i], qce);
514 if (ret)
515 goto err;
516 }
517
518 return 0;
519 err:
520 qce_skcipher_unregister(qce);
521 return ret;
522 }
523
524 const struct qce_algo_ops skcipher_ops = {
525 .type = CRYPTO_ALG_TYPE_SKCIPHER,
526 .register_algs = qce_skcipher_register,
527 .unregister_algs = qce_skcipher_unregister,
528 .async_req_handle = qce_skcipher_async_req_handle,
529 };
530