1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Software async crypto daemon.
4 *
5 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
6 *
7 * Added AEAD support to cryptd.
8 * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
9 * Adrian Hoban <adrian.hoban@intel.com>
10 * Gabriele Paoloni <gabriele.paoloni@intel.com>
11 * Aidan O'Mahony (aidan.o.mahony@intel.com)
12 * Copyright (c) 2010, Intel Corporation.
13 */
14
15 #include <crypto/internal/hash.h>
16 #include <crypto/internal/aead.h>
17 #include <crypto/internal/skcipher.h>
18 #include <crypto/cryptd.h>
19 #include <linux/refcount.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/workqueue.h>
29
30 static unsigned int cryptd_max_cpu_qlen = 1000;
31 module_param(cryptd_max_cpu_qlen, uint, 0);
32 MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
33
34 static struct workqueue_struct *cryptd_wq;
35
36 struct cryptd_cpu_queue {
37 struct crypto_queue queue;
38 struct work_struct work;
39 };
40
41 struct cryptd_queue {
42 /*
43 * Protected by disabling BH to allow enqueueing from softinterrupt and
44 * dequeuing from kworker (cryptd_queue_worker()).
45 */
46 struct cryptd_cpu_queue __percpu *cpu_queue;
47 };
48
49 struct cryptd_instance_ctx {
50 struct crypto_spawn spawn;
51 struct cryptd_queue *queue;
52 };
53
54 struct skcipherd_instance_ctx {
55 struct crypto_skcipher_spawn spawn;
56 struct cryptd_queue *queue;
57 };
58
59 struct hashd_instance_ctx {
60 struct crypto_shash_spawn spawn;
61 struct cryptd_queue *queue;
62 };
63
64 struct aead_instance_ctx {
65 struct crypto_aead_spawn aead_spawn;
66 struct cryptd_queue *queue;
67 };
68
69 struct cryptd_skcipher_ctx {
70 refcount_t refcnt;
71 struct crypto_sync_skcipher *child;
72 };
73
74 struct cryptd_skcipher_request_ctx {
75 crypto_completion_t complete;
76 };
77
78 struct cryptd_hash_ctx {
79 refcount_t refcnt;
80 struct crypto_shash *child;
81 };
82
83 struct cryptd_hash_request_ctx {
84 crypto_completion_t complete;
85 struct shash_desc desc;
86 };
87
88 struct cryptd_aead_ctx {
89 refcount_t refcnt;
90 struct crypto_aead *child;
91 };
92
93 struct cryptd_aead_request_ctx {
94 crypto_completion_t complete;
95 };
96
97 static void cryptd_queue_worker(struct work_struct *work);
98
cryptd_init_queue(struct cryptd_queue * queue,unsigned int max_cpu_qlen)99 static int cryptd_init_queue(struct cryptd_queue *queue,
100 unsigned int max_cpu_qlen)
101 {
102 int cpu;
103 struct cryptd_cpu_queue *cpu_queue;
104
105 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
106 if (!queue->cpu_queue)
107 return -ENOMEM;
108 for_each_possible_cpu(cpu) {
109 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
110 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
111 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
112 }
113 pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
114 return 0;
115 }
116
cryptd_fini_queue(struct cryptd_queue * queue)117 static void cryptd_fini_queue(struct cryptd_queue *queue)
118 {
119 int cpu;
120 struct cryptd_cpu_queue *cpu_queue;
121
122 for_each_possible_cpu(cpu) {
123 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
124 BUG_ON(cpu_queue->queue.qlen);
125 }
126 free_percpu(queue->cpu_queue);
127 }
128
cryptd_enqueue_request(struct cryptd_queue * queue,struct crypto_async_request * request)129 static int cryptd_enqueue_request(struct cryptd_queue *queue,
130 struct crypto_async_request *request)
131 {
132 int err;
133 struct cryptd_cpu_queue *cpu_queue;
134 refcount_t *refcnt;
135
136 local_bh_disable();
137 cpu_queue = this_cpu_ptr(queue->cpu_queue);
138 err = crypto_enqueue_request(&cpu_queue->queue, request);
139
140 refcnt = crypto_tfm_ctx(request->tfm);
141
142 if (err == -ENOSPC)
143 goto out;
144
145 queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
146
147 if (!refcount_read(refcnt))
148 goto out;
149
150 refcount_inc(refcnt);
151
152 out:
153 local_bh_enable();
154
155 return err;
156 }
157
158 /* Called in workqueue context, do one real cryption work (via
159 * req->complete) and reschedule itself if there are more work to
160 * do. */
cryptd_queue_worker(struct work_struct * work)161 static void cryptd_queue_worker(struct work_struct *work)
162 {
163 struct cryptd_cpu_queue *cpu_queue;
164 struct crypto_async_request *req, *backlog;
165
166 cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
167 /*
168 * Only handle one request at a time to avoid hogging crypto workqueue.
169 */
170 local_bh_disable();
171 backlog = crypto_get_backlog(&cpu_queue->queue);
172 req = crypto_dequeue_request(&cpu_queue->queue);
173 local_bh_enable();
174
175 if (!req)
176 return;
177
178 if (backlog)
179 backlog->complete(backlog, -EINPROGRESS);
180 req->complete(req, 0);
181
182 if (cpu_queue->queue.qlen)
183 queue_work(cryptd_wq, &cpu_queue->work);
184 }
185
cryptd_get_queue(struct crypto_tfm * tfm)186 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
187 {
188 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
189 struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
190 return ictx->queue;
191 }
192
cryptd_check_internal(struct rtattr ** tb,u32 * type,u32 * mask)193 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
194 u32 *mask)
195 {
196 struct crypto_attr_type *algt;
197
198 algt = crypto_get_attr_type(tb);
199 if (IS_ERR(algt))
200 return;
201
202 *type |= algt->type & CRYPTO_ALG_INTERNAL;
203 *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
204 }
205
cryptd_init_instance(struct crypto_instance * inst,struct crypto_alg * alg)206 static int cryptd_init_instance(struct crypto_instance *inst,
207 struct crypto_alg *alg)
208 {
209 if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
210 "cryptd(%s)",
211 alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
212 return -ENAMETOOLONG;
213
214 memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
215
216 inst->alg.cra_priority = alg->cra_priority + 50;
217 inst->alg.cra_blocksize = alg->cra_blocksize;
218 inst->alg.cra_alignmask = alg->cra_alignmask;
219
220 return 0;
221 }
222
cryptd_alloc_instance(struct crypto_alg * alg,unsigned int head,unsigned int tail)223 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
224 unsigned int tail)
225 {
226 char *p;
227 struct crypto_instance *inst;
228 int err;
229
230 p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
231 if (!p)
232 return ERR_PTR(-ENOMEM);
233
234 inst = (void *)(p + head);
235
236 err = cryptd_init_instance(inst, alg);
237 if (err)
238 goto out_free_inst;
239
240 out:
241 return p;
242
243 out_free_inst:
244 kfree(p);
245 p = ERR_PTR(err);
246 goto out;
247 }
248
cryptd_skcipher_setkey(struct crypto_skcipher * parent,const u8 * key,unsigned int keylen)249 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
250 const u8 *key, unsigned int keylen)
251 {
252 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
253 struct crypto_sync_skcipher *child = ctx->child;
254 int err;
255
256 crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
257 crypto_sync_skcipher_set_flags(child,
258 crypto_skcipher_get_flags(parent) &
259 CRYPTO_TFM_REQ_MASK);
260 err = crypto_sync_skcipher_setkey(child, key, keylen);
261 crypto_skcipher_set_flags(parent,
262 crypto_sync_skcipher_get_flags(child) &
263 CRYPTO_TFM_RES_MASK);
264 return err;
265 }
266
cryptd_skcipher_complete(struct skcipher_request * req,int err)267 static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
268 {
269 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
270 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
271 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
272 int refcnt = refcount_read(&ctx->refcnt);
273
274 local_bh_disable();
275 rctx->complete(&req->base, err);
276 local_bh_enable();
277
278 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
279 crypto_free_skcipher(tfm);
280 }
281
cryptd_skcipher_encrypt(struct crypto_async_request * base,int err)282 static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
283 int err)
284 {
285 struct skcipher_request *req = skcipher_request_cast(base);
286 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
287 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
288 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
289 struct crypto_sync_skcipher *child = ctx->child;
290 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
291
292 if (unlikely(err == -EINPROGRESS))
293 goto out;
294
295 skcipher_request_set_sync_tfm(subreq, child);
296 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
297 NULL, NULL);
298 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
299 req->iv);
300
301 err = crypto_skcipher_encrypt(subreq);
302 skcipher_request_zero(subreq);
303
304 req->base.complete = rctx->complete;
305
306 out:
307 cryptd_skcipher_complete(req, err);
308 }
309
cryptd_skcipher_decrypt(struct crypto_async_request * base,int err)310 static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
311 int err)
312 {
313 struct skcipher_request *req = skcipher_request_cast(base);
314 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
315 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
316 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
317 struct crypto_sync_skcipher *child = ctx->child;
318 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
319
320 if (unlikely(err == -EINPROGRESS))
321 goto out;
322
323 skcipher_request_set_sync_tfm(subreq, child);
324 skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
325 NULL, NULL);
326 skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
327 req->iv);
328
329 err = crypto_skcipher_decrypt(subreq);
330 skcipher_request_zero(subreq);
331
332 req->base.complete = rctx->complete;
333
334 out:
335 cryptd_skcipher_complete(req, err);
336 }
337
cryptd_skcipher_enqueue(struct skcipher_request * req,crypto_completion_t compl)338 static int cryptd_skcipher_enqueue(struct skcipher_request *req,
339 crypto_completion_t compl)
340 {
341 struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
342 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
343 struct cryptd_queue *queue;
344
345 queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
346 rctx->complete = req->base.complete;
347 req->base.complete = compl;
348
349 return cryptd_enqueue_request(queue, &req->base);
350 }
351
cryptd_skcipher_encrypt_enqueue(struct skcipher_request * req)352 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
353 {
354 return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
355 }
356
cryptd_skcipher_decrypt_enqueue(struct skcipher_request * req)357 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
358 {
359 return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
360 }
361
cryptd_skcipher_init_tfm(struct crypto_skcipher * tfm)362 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
363 {
364 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
365 struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
366 struct crypto_skcipher_spawn *spawn = &ictx->spawn;
367 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
368 struct crypto_skcipher *cipher;
369
370 cipher = crypto_spawn_skcipher(spawn);
371 if (IS_ERR(cipher))
372 return PTR_ERR(cipher);
373
374 ctx->child = (struct crypto_sync_skcipher *)cipher;
375 crypto_skcipher_set_reqsize(
376 tfm, sizeof(struct cryptd_skcipher_request_ctx));
377 return 0;
378 }
379
cryptd_skcipher_exit_tfm(struct crypto_skcipher * tfm)380 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
381 {
382 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
383
384 crypto_free_sync_skcipher(ctx->child);
385 }
386
cryptd_skcipher_free(struct skcipher_instance * inst)387 static void cryptd_skcipher_free(struct skcipher_instance *inst)
388 {
389 struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
390
391 crypto_drop_skcipher(&ctx->spawn);
392 kfree(inst);
393 }
394
cryptd_create_skcipher(struct crypto_template * tmpl,struct rtattr ** tb,struct cryptd_queue * queue)395 static int cryptd_create_skcipher(struct crypto_template *tmpl,
396 struct rtattr **tb,
397 struct cryptd_queue *queue)
398 {
399 struct skcipherd_instance_ctx *ctx;
400 struct skcipher_instance *inst;
401 struct skcipher_alg *alg;
402 const char *name;
403 u32 type;
404 u32 mask;
405 int err;
406
407 type = 0;
408 mask = CRYPTO_ALG_ASYNC;
409
410 cryptd_check_internal(tb, &type, &mask);
411
412 name = crypto_attr_alg_name(tb[1]);
413 if (IS_ERR(name))
414 return PTR_ERR(name);
415
416 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
417 if (!inst)
418 return -ENOMEM;
419
420 ctx = skcipher_instance_ctx(inst);
421 ctx->queue = queue;
422
423 crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
424 err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
425 if (err)
426 goto out_free_inst;
427
428 alg = crypto_spawn_skcipher_alg(&ctx->spawn);
429 err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
430 if (err)
431 goto out_drop_skcipher;
432
433 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
434 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
435
436 inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
437 inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
438 inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
439 inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
440
441 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
442
443 inst->alg.init = cryptd_skcipher_init_tfm;
444 inst->alg.exit = cryptd_skcipher_exit_tfm;
445
446 inst->alg.setkey = cryptd_skcipher_setkey;
447 inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
448 inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
449
450 inst->free = cryptd_skcipher_free;
451
452 err = skcipher_register_instance(tmpl, inst);
453 if (err) {
454 out_drop_skcipher:
455 crypto_drop_skcipher(&ctx->spawn);
456 out_free_inst:
457 kfree(inst);
458 }
459 return err;
460 }
461
cryptd_hash_init_tfm(struct crypto_tfm * tfm)462 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
463 {
464 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
465 struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
466 struct crypto_shash_spawn *spawn = &ictx->spawn;
467 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
468 struct crypto_shash *hash;
469
470 hash = crypto_spawn_shash(spawn);
471 if (IS_ERR(hash))
472 return PTR_ERR(hash);
473
474 ctx->child = hash;
475 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
476 sizeof(struct cryptd_hash_request_ctx) +
477 crypto_shash_descsize(hash));
478 return 0;
479 }
480
cryptd_hash_exit_tfm(struct crypto_tfm * tfm)481 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
482 {
483 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
484
485 crypto_free_shash(ctx->child);
486 }
487
cryptd_hash_setkey(struct crypto_ahash * parent,const u8 * key,unsigned int keylen)488 static int cryptd_hash_setkey(struct crypto_ahash *parent,
489 const u8 *key, unsigned int keylen)
490 {
491 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
492 struct crypto_shash *child = ctx->child;
493 int err;
494
495 crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
496 crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
497 CRYPTO_TFM_REQ_MASK);
498 err = crypto_shash_setkey(child, key, keylen);
499 crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
500 CRYPTO_TFM_RES_MASK);
501 return err;
502 }
503
cryptd_hash_enqueue(struct ahash_request * req,crypto_completion_t compl)504 static int cryptd_hash_enqueue(struct ahash_request *req,
505 crypto_completion_t compl)
506 {
507 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
508 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
509 struct cryptd_queue *queue =
510 cryptd_get_queue(crypto_ahash_tfm(tfm));
511
512 rctx->complete = req->base.complete;
513 req->base.complete = compl;
514
515 return cryptd_enqueue_request(queue, &req->base);
516 }
517
cryptd_hash_complete(struct ahash_request * req,int err)518 static void cryptd_hash_complete(struct ahash_request *req, int err)
519 {
520 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
521 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
522 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
523 int refcnt = refcount_read(&ctx->refcnt);
524
525 local_bh_disable();
526 rctx->complete(&req->base, err);
527 local_bh_enable();
528
529 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
530 crypto_free_ahash(tfm);
531 }
532
cryptd_hash_init(struct crypto_async_request * req_async,int err)533 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
534 {
535 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
536 struct crypto_shash *child = ctx->child;
537 struct ahash_request *req = ahash_request_cast(req_async);
538 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
539 struct shash_desc *desc = &rctx->desc;
540
541 if (unlikely(err == -EINPROGRESS))
542 goto out;
543
544 desc->tfm = child;
545
546 err = crypto_shash_init(desc);
547
548 req->base.complete = rctx->complete;
549
550 out:
551 cryptd_hash_complete(req, err);
552 }
553
cryptd_hash_init_enqueue(struct ahash_request * req)554 static int cryptd_hash_init_enqueue(struct ahash_request *req)
555 {
556 return cryptd_hash_enqueue(req, cryptd_hash_init);
557 }
558
cryptd_hash_update(struct crypto_async_request * req_async,int err)559 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
560 {
561 struct ahash_request *req = ahash_request_cast(req_async);
562 struct cryptd_hash_request_ctx *rctx;
563
564 rctx = ahash_request_ctx(req);
565
566 if (unlikely(err == -EINPROGRESS))
567 goto out;
568
569 err = shash_ahash_update(req, &rctx->desc);
570
571 req->base.complete = rctx->complete;
572
573 out:
574 cryptd_hash_complete(req, err);
575 }
576
cryptd_hash_update_enqueue(struct ahash_request * req)577 static int cryptd_hash_update_enqueue(struct ahash_request *req)
578 {
579 return cryptd_hash_enqueue(req, cryptd_hash_update);
580 }
581
cryptd_hash_final(struct crypto_async_request * req_async,int err)582 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
583 {
584 struct ahash_request *req = ahash_request_cast(req_async);
585 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
586
587 if (unlikely(err == -EINPROGRESS))
588 goto out;
589
590 err = crypto_shash_final(&rctx->desc, req->result);
591
592 req->base.complete = rctx->complete;
593
594 out:
595 cryptd_hash_complete(req, err);
596 }
597
cryptd_hash_final_enqueue(struct ahash_request * req)598 static int cryptd_hash_final_enqueue(struct ahash_request *req)
599 {
600 return cryptd_hash_enqueue(req, cryptd_hash_final);
601 }
602
cryptd_hash_finup(struct crypto_async_request * req_async,int err)603 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
604 {
605 struct ahash_request *req = ahash_request_cast(req_async);
606 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
607
608 if (unlikely(err == -EINPROGRESS))
609 goto out;
610
611 err = shash_ahash_finup(req, &rctx->desc);
612
613 req->base.complete = rctx->complete;
614
615 out:
616 cryptd_hash_complete(req, err);
617 }
618
cryptd_hash_finup_enqueue(struct ahash_request * req)619 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
620 {
621 return cryptd_hash_enqueue(req, cryptd_hash_finup);
622 }
623
cryptd_hash_digest(struct crypto_async_request * req_async,int err)624 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
625 {
626 struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
627 struct crypto_shash *child = ctx->child;
628 struct ahash_request *req = ahash_request_cast(req_async);
629 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
630 struct shash_desc *desc = &rctx->desc;
631
632 if (unlikely(err == -EINPROGRESS))
633 goto out;
634
635 desc->tfm = child;
636
637 err = shash_ahash_digest(req, desc);
638
639 req->base.complete = rctx->complete;
640
641 out:
642 cryptd_hash_complete(req, err);
643 }
644
cryptd_hash_digest_enqueue(struct ahash_request * req)645 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
646 {
647 return cryptd_hash_enqueue(req, cryptd_hash_digest);
648 }
649
cryptd_hash_export(struct ahash_request * req,void * out)650 static int cryptd_hash_export(struct ahash_request *req, void *out)
651 {
652 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
653
654 return crypto_shash_export(&rctx->desc, out);
655 }
656
cryptd_hash_import(struct ahash_request * req,const void * in)657 static int cryptd_hash_import(struct ahash_request *req, const void *in)
658 {
659 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
660 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
661 struct shash_desc *desc = cryptd_shash_desc(req);
662
663 desc->tfm = ctx->child;
664
665 return crypto_shash_import(desc, in);
666 }
667
cryptd_create_hash(struct crypto_template * tmpl,struct rtattr ** tb,struct cryptd_queue * queue)668 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
669 struct cryptd_queue *queue)
670 {
671 struct hashd_instance_ctx *ctx;
672 struct ahash_instance *inst;
673 struct shash_alg *salg;
674 struct crypto_alg *alg;
675 u32 type = 0;
676 u32 mask = 0;
677 int err;
678
679 cryptd_check_internal(tb, &type, &mask);
680
681 salg = shash_attr_alg(tb[1], type, mask);
682 if (IS_ERR(salg))
683 return PTR_ERR(salg);
684
685 alg = &salg->base;
686 inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
687 sizeof(*ctx));
688 err = PTR_ERR(inst);
689 if (IS_ERR(inst))
690 goto out_put_alg;
691
692 ctx = ahash_instance_ctx(inst);
693 ctx->queue = queue;
694
695 err = crypto_init_shash_spawn(&ctx->spawn, salg,
696 ahash_crypto_instance(inst));
697 if (err)
698 goto out_free_inst;
699
700 inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
701 (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
702 CRYPTO_ALG_OPTIONAL_KEY));
703
704 inst->alg.halg.digestsize = salg->digestsize;
705 inst->alg.halg.statesize = salg->statesize;
706 inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
707
708 inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
709 inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
710
711 inst->alg.init = cryptd_hash_init_enqueue;
712 inst->alg.update = cryptd_hash_update_enqueue;
713 inst->alg.final = cryptd_hash_final_enqueue;
714 inst->alg.finup = cryptd_hash_finup_enqueue;
715 inst->alg.export = cryptd_hash_export;
716 inst->alg.import = cryptd_hash_import;
717 if (crypto_shash_alg_has_setkey(salg))
718 inst->alg.setkey = cryptd_hash_setkey;
719 inst->alg.digest = cryptd_hash_digest_enqueue;
720
721 err = ahash_register_instance(tmpl, inst);
722 if (err) {
723 crypto_drop_shash(&ctx->spawn);
724 out_free_inst:
725 kfree(inst);
726 }
727
728 out_put_alg:
729 crypto_mod_put(alg);
730 return err;
731 }
732
cryptd_aead_setkey(struct crypto_aead * parent,const u8 * key,unsigned int keylen)733 static int cryptd_aead_setkey(struct crypto_aead *parent,
734 const u8 *key, unsigned int keylen)
735 {
736 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
737 struct crypto_aead *child = ctx->child;
738
739 return crypto_aead_setkey(child, key, keylen);
740 }
741
cryptd_aead_setauthsize(struct crypto_aead * parent,unsigned int authsize)742 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
743 unsigned int authsize)
744 {
745 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
746 struct crypto_aead *child = ctx->child;
747
748 return crypto_aead_setauthsize(child, authsize);
749 }
750
cryptd_aead_crypt(struct aead_request * req,struct crypto_aead * child,int err,int (* crypt)(struct aead_request * req))751 static void cryptd_aead_crypt(struct aead_request *req,
752 struct crypto_aead *child,
753 int err,
754 int (*crypt)(struct aead_request *req))
755 {
756 struct cryptd_aead_request_ctx *rctx;
757 struct cryptd_aead_ctx *ctx;
758 crypto_completion_t compl;
759 struct crypto_aead *tfm;
760 int refcnt;
761
762 rctx = aead_request_ctx(req);
763 compl = rctx->complete;
764
765 tfm = crypto_aead_reqtfm(req);
766
767 if (unlikely(err == -EINPROGRESS))
768 goto out;
769 aead_request_set_tfm(req, child);
770 err = crypt( req );
771
772 out:
773 ctx = crypto_aead_ctx(tfm);
774 refcnt = refcount_read(&ctx->refcnt);
775
776 local_bh_disable();
777 compl(&req->base, err);
778 local_bh_enable();
779
780 if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
781 crypto_free_aead(tfm);
782 }
783
cryptd_aead_encrypt(struct crypto_async_request * areq,int err)784 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
785 {
786 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
787 struct crypto_aead *child = ctx->child;
788 struct aead_request *req;
789
790 req = container_of(areq, struct aead_request, base);
791 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
792 }
793
cryptd_aead_decrypt(struct crypto_async_request * areq,int err)794 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
795 {
796 struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
797 struct crypto_aead *child = ctx->child;
798 struct aead_request *req;
799
800 req = container_of(areq, struct aead_request, base);
801 cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
802 }
803
cryptd_aead_enqueue(struct aead_request * req,crypto_completion_t compl)804 static int cryptd_aead_enqueue(struct aead_request *req,
805 crypto_completion_t compl)
806 {
807 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
808 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
809 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
810
811 rctx->complete = req->base.complete;
812 req->base.complete = compl;
813 return cryptd_enqueue_request(queue, &req->base);
814 }
815
cryptd_aead_encrypt_enqueue(struct aead_request * req)816 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
817 {
818 return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
819 }
820
cryptd_aead_decrypt_enqueue(struct aead_request * req)821 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
822 {
823 return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
824 }
825
cryptd_aead_init_tfm(struct crypto_aead * tfm)826 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
827 {
828 struct aead_instance *inst = aead_alg_instance(tfm);
829 struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
830 struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
831 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
832 struct crypto_aead *cipher;
833
834 cipher = crypto_spawn_aead(spawn);
835 if (IS_ERR(cipher))
836 return PTR_ERR(cipher);
837
838 ctx->child = cipher;
839 crypto_aead_set_reqsize(
840 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
841 crypto_aead_reqsize(cipher)));
842 return 0;
843 }
844
cryptd_aead_exit_tfm(struct crypto_aead * tfm)845 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
846 {
847 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
848 crypto_free_aead(ctx->child);
849 }
850
cryptd_create_aead(struct crypto_template * tmpl,struct rtattr ** tb,struct cryptd_queue * queue)851 static int cryptd_create_aead(struct crypto_template *tmpl,
852 struct rtattr **tb,
853 struct cryptd_queue *queue)
854 {
855 struct aead_instance_ctx *ctx;
856 struct aead_instance *inst;
857 struct aead_alg *alg;
858 const char *name;
859 u32 type = 0;
860 u32 mask = CRYPTO_ALG_ASYNC;
861 int err;
862
863 cryptd_check_internal(tb, &type, &mask);
864
865 name = crypto_attr_alg_name(tb[1]);
866 if (IS_ERR(name))
867 return PTR_ERR(name);
868
869 inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
870 if (!inst)
871 return -ENOMEM;
872
873 ctx = aead_instance_ctx(inst);
874 ctx->queue = queue;
875
876 crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
877 err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
878 if (err)
879 goto out_free_inst;
880
881 alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
882 err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
883 if (err)
884 goto out_drop_aead;
885
886 inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
887 (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
888 inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
889
890 inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
891 inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
892
893 inst->alg.init = cryptd_aead_init_tfm;
894 inst->alg.exit = cryptd_aead_exit_tfm;
895 inst->alg.setkey = cryptd_aead_setkey;
896 inst->alg.setauthsize = cryptd_aead_setauthsize;
897 inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
898 inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
899
900 err = aead_register_instance(tmpl, inst);
901 if (err) {
902 out_drop_aead:
903 crypto_drop_aead(&ctx->aead_spawn);
904 out_free_inst:
905 kfree(inst);
906 }
907 return err;
908 }
909
910 static struct cryptd_queue queue;
911
cryptd_create(struct crypto_template * tmpl,struct rtattr ** tb)912 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
913 {
914 struct crypto_attr_type *algt;
915
916 algt = crypto_get_attr_type(tb);
917 if (IS_ERR(algt))
918 return PTR_ERR(algt);
919
920 switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
921 case CRYPTO_ALG_TYPE_BLKCIPHER:
922 return cryptd_create_skcipher(tmpl, tb, &queue);
923 case CRYPTO_ALG_TYPE_HASH:
924 return cryptd_create_hash(tmpl, tb, &queue);
925 case CRYPTO_ALG_TYPE_AEAD:
926 return cryptd_create_aead(tmpl, tb, &queue);
927 }
928
929 return -EINVAL;
930 }
931
cryptd_free(struct crypto_instance * inst)932 static void cryptd_free(struct crypto_instance *inst)
933 {
934 struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
935 struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
936 struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
937
938 switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
939 case CRYPTO_ALG_TYPE_AHASH:
940 crypto_drop_shash(&hctx->spawn);
941 kfree(ahash_instance(inst));
942 return;
943 case CRYPTO_ALG_TYPE_AEAD:
944 crypto_drop_aead(&aead_ctx->aead_spawn);
945 kfree(aead_instance(inst));
946 return;
947 default:
948 crypto_drop_spawn(&ctx->spawn);
949 kfree(inst);
950 }
951 }
952
953 static struct crypto_template cryptd_tmpl = {
954 .name = "cryptd",
955 .create = cryptd_create,
956 .free = cryptd_free,
957 .module = THIS_MODULE,
958 };
959
cryptd_alloc_skcipher(const char * alg_name,u32 type,u32 mask)960 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
961 u32 type, u32 mask)
962 {
963 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
964 struct cryptd_skcipher_ctx *ctx;
965 struct crypto_skcipher *tfm;
966
967 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
968 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
969 return ERR_PTR(-EINVAL);
970
971 tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
972 if (IS_ERR(tfm))
973 return ERR_CAST(tfm);
974
975 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
976 crypto_free_skcipher(tfm);
977 return ERR_PTR(-EINVAL);
978 }
979
980 ctx = crypto_skcipher_ctx(tfm);
981 refcount_set(&ctx->refcnt, 1);
982
983 return container_of(tfm, struct cryptd_skcipher, base);
984 }
985 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
986
cryptd_skcipher_child(struct cryptd_skcipher * tfm)987 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
988 {
989 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
990
991 return &ctx->child->base;
992 }
993 EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
994
cryptd_skcipher_queued(struct cryptd_skcipher * tfm)995 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
996 {
997 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
998
999 return refcount_read(&ctx->refcnt) - 1;
1000 }
1001 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1002
cryptd_free_skcipher(struct cryptd_skcipher * tfm)1003 void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1004 {
1005 struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1006
1007 if (refcount_dec_and_test(&ctx->refcnt))
1008 crypto_free_skcipher(&tfm->base);
1009 }
1010 EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1011
cryptd_alloc_ahash(const char * alg_name,u32 type,u32 mask)1012 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1013 u32 type, u32 mask)
1014 {
1015 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1016 struct cryptd_hash_ctx *ctx;
1017 struct crypto_ahash *tfm;
1018
1019 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1020 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1021 return ERR_PTR(-EINVAL);
1022 tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1023 if (IS_ERR(tfm))
1024 return ERR_CAST(tfm);
1025 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1026 crypto_free_ahash(tfm);
1027 return ERR_PTR(-EINVAL);
1028 }
1029
1030 ctx = crypto_ahash_ctx(tfm);
1031 refcount_set(&ctx->refcnt, 1);
1032
1033 return __cryptd_ahash_cast(tfm);
1034 }
1035 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1036
cryptd_ahash_child(struct cryptd_ahash * tfm)1037 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1038 {
1039 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1040
1041 return ctx->child;
1042 }
1043 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1044
cryptd_shash_desc(struct ahash_request * req)1045 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1046 {
1047 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1048 return &rctx->desc;
1049 }
1050 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1051
cryptd_ahash_queued(struct cryptd_ahash * tfm)1052 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1053 {
1054 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1055
1056 return refcount_read(&ctx->refcnt) - 1;
1057 }
1058 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1059
cryptd_free_ahash(struct cryptd_ahash * tfm)1060 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1061 {
1062 struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1063
1064 if (refcount_dec_and_test(&ctx->refcnt))
1065 crypto_free_ahash(&tfm->base);
1066 }
1067 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1068
cryptd_alloc_aead(const char * alg_name,u32 type,u32 mask)1069 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1070 u32 type, u32 mask)
1071 {
1072 char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1073 struct cryptd_aead_ctx *ctx;
1074 struct crypto_aead *tfm;
1075
1076 if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1077 "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1078 return ERR_PTR(-EINVAL);
1079 tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1080 if (IS_ERR(tfm))
1081 return ERR_CAST(tfm);
1082 if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1083 crypto_free_aead(tfm);
1084 return ERR_PTR(-EINVAL);
1085 }
1086
1087 ctx = crypto_aead_ctx(tfm);
1088 refcount_set(&ctx->refcnt, 1);
1089
1090 return __cryptd_aead_cast(tfm);
1091 }
1092 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1093
cryptd_aead_child(struct cryptd_aead * tfm)1094 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1095 {
1096 struct cryptd_aead_ctx *ctx;
1097 ctx = crypto_aead_ctx(&tfm->base);
1098 return ctx->child;
1099 }
1100 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1101
cryptd_aead_queued(struct cryptd_aead * tfm)1102 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1103 {
1104 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1105
1106 return refcount_read(&ctx->refcnt) - 1;
1107 }
1108 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1109
cryptd_free_aead(struct cryptd_aead * tfm)1110 void cryptd_free_aead(struct cryptd_aead *tfm)
1111 {
1112 struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1113
1114 if (refcount_dec_and_test(&ctx->refcnt))
1115 crypto_free_aead(&tfm->base);
1116 }
1117 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1118
cryptd_init(void)1119 static int __init cryptd_init(void)
1120 {
1121 int err;
1122
1123 cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1124 1);
1125 if (!cryptd_wq)
1126 return -ENOMEM;
1127
1128 err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
1129 if (err)
1130 goto err_destroy_wq;
1131
1132 err = crypto_register_template(&cryptd_tmpl);
1133 if (err)
1134 goto err_fini_queue;
1135
1136 return 0;
1137
1138 err_fini_queue:
1139 cryptd_fini_queue(&queue);
1140 err_destroy_wq:
1141 destroy_workqueue(cryptd_wq);
1142 return err;
1143 }
1144
cryptd_exit(void)1145 static void __exit cryptd_exit(void)
1146 {
1147 destroy_workqueue(cryptd_wq);
1148 cryptd_fini_queue(&queue);
1149 crypto_unregister_template(&cryptd_tmpl);
1150 }
1151
1152 subsys_initcall(cryptd_init);
1153 module_exit(cryptd_exit);
1154
1155 MODULE_LICENSE("GPL");
1156 MODULE_DESCRIPTION("Software async crypto daemon");
1157 MODULE_ALIAS_CRYPTO("cryptd");
1158