• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Software multibuffer async crypto daemon.
3  *
4  * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
5  *
6  * Adapted from crypto daemon.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  *
13  */
14 
15 #include <crypto/algapi.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/internal/aead.h>
18 #include <crypto/mcryptd.h>
19 #include <crypto/crypto_wq.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/hardirq.h>
29 
30 #define MCRYPTD_MAX_CPU_QLEN 100
31 #define MCRYPTD_BATCH 9
32 
33 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
34 				   unsigned int tail);
35 
36 struct mcryptd_flush_list {
37 	struct list_head list;
38 	struct mutex lock;
39 };
40 
41 static struct mcryptd_flush_list __percpu *mcryptd_flist;
42 
43 struct hashd_instance_ctx {
44 	struct crypto_shash_spawn spawn;
45 	struct mcryptd_queue *queue;
46 };
47 
48 static void mcryptd_queue_worker(struct work_struct *work);
49 
mcryptd_arm_flusher(struct mcryptd_alg_cstate * cstate,unsigned long delay)50 void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
51 {
52 	struct mcryptd_flush_list *flist;
53 
54 	if (!cstate->flusher_engaged) {
55 		/* put the flusher on the flush list */
56 		flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
57 		mutex_lock(&flist->lock);
58 		list_add_tail(&cstate->flush_list, &flist->list);
59 		cstate->flusher_engaged = true;
60 		cstate->next_flush = jiffies + delay;
61 		queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
62 			&cstate->flush, delay);
63 		mutex_unlock(&flist->lock);
64 	}
65 }
66 EXPORT_SYMBOL(mcryptd_arm_flusher);
67 
mcryptd_init_queue(struct mcryptd_queue * queue,unsigned int max_cpu_qlen)68 static int mcryptd_init_queue(struct mcryptd_queue *queue,
69 			     unsigned int max_cpu_qlen)
70 {
71 	int cpu;
72 	struct mcryptd_cpu_queue *cpu_queue;
73 
74 	queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
75 	pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
76 	if (!queue->cpu_queue)
77 		return -ENOMEM;
78 	for_each_possible_cpu(cpu) {
79 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
80 		pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
81 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
82 		INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
83 		spin_lock_init(&cpu_queue->q_lock);
84 	}
85 	return 0;
86 }
87 
mcryptd_fini_queue(struct mcryptd_queue * queue)88 static void mcryptd_fini_queue(struct mcryptd_queue *queue)
89 {
90 	int cpu;
91 	struct mcryptd_cpu_queue *cpu_queue;
92 
93 	for_each_possible_cpu(cpu) {
94 		cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
95 		BUG_ON(cpu_queue->queue.qlen);
96 	}
97 	free_percpu(queue->cpu_queue);
98 }
99 
mcryptd_enqueue_request(struct mcryptd_queue * queue,struct crypto_async_request * request,struct mcryptd_hash_request_ctx * rctx)100 static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
101 				  struct crypto_async_request *request,
102 				  struct mcryptd_hash_request_ctx *rctx)
103 {
104 	int cpu, err;
105 	struct mcryptd_cpu_queue *cpu_queue;
106 
107 	cpu_queue = raw_cpu_ptr(queue->cpu_queue);
108 	spin_lock(&cpu_queue->q_lock);
109 	cpu = smp_processor_id();
110 	rctx->tag.cpu = smp_processor_id();
111 
112 	err = crypto_enqueue_request(&cpu_queue->queue, request);
113 	pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
114 		 cpu, cpu_queue, request);
115 	spin_unlock(&cpu_queue->q_lock);
116 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
117 
118 	return err;
119 }
120 
121 /*
122  * Try to opportunisticlly flush the partially completed jobs if
123  * crypto daemon is the only task running.
124  */
mcryptd_opportunistic_flush(void)125 static void mcryptd_opportunistic_flush(void)
126 {
127 	struct mcryptd_flush_list *flist;
128 	struct mcryptd_alg_cstate *cstate;
129 
130 	flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
131 	while (single_task_running()) {
132 		mutex_lock(&flist->lock);
133 		if (list_empty(&flist->list)) {
134 			mutex_unlock(&flist->lock);
135 			return;
136 		}
137 		cstate = list_entry(flist->list.next,
138 				struct mcryptd_alg_cstate, flush_list);
139 		if (!cstate->flusher_engaged) {
140 			mutex_unlock(&flist->lock);
141 			return;
142 		}
143 		list_del(&cstate->flush_list);
144 		cstate->flusher_engaged = false;
145 		mutex_unlock(&flist->lock);
146 		cstate->alg_state->flusher(cstate);
147 	}
148 }
149 
150 /*
151  * Called in workqueue context, do one real cryption work (via
152  * req->complete) and reschedule itself if there are more work to
153  * do.
154  */
mcryptd_queue_worker(struct work_struct * work)155 static void mcryptd_queue_worker(struct work_struct *work)
156 {
157 	struct mcryptd_cpu_queue *cpu_queue;
158 	struct crypto_async_request *req, *backlog;
159 	int i;
160 
161 	/*
162 	 * Need to loop through more than once for multi-buffer to
163 	 * be effective.
164 	 */
165 
166 	cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
167 	i = 0;
168 	while (i < MCRYPTD_BATCH || single_task_running()) {
169 
170 		spin_lock_bh(&cpu_queue->q_lock);
171 		backlog = crypto_get_backlog(&cpu_queue->queue);
172 		req = crypto_dequeue_request(&cpu_queue->queue);
173 		spin_unlock_bh(&cpu_queue->q_lock);
174 
175 		if (!req) {
176 			mcryptd_opportunistic_flush();
177 			return;
178 		}
179 
180 		if (backlog)
181 			backlog->complete(backlog, -EINPROGRESS);
182 		req->complete(req, 0);
183 		if (!cpu_queue->queue.qlen)
184 			return;
185 		++i;
186 	}
187 	if (cpu_queue->queue.qlen)
188 		queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
189 }
190 
mcryptd_flusher(struct work_struct * __work)191 void mcryptd_flusher(struct work_struct *__work)
192 {
193 	struct	mcryptd_alg_cstate	*alg_cpu_state;
194 	struct	mcryptd_alg_state	*alg_state;
195 	struct	mcryptd_flush_list	*flist;
196 	int	cpu;
197 
198 	cpu = smp_processor_id();
199 	alg_cpu_state = container_of(to_delayed_work(__work),
200 				     struct mcryptd_alg_cstate, flush);
201 	alg_state = alg_cpu_state->alg_state;
202 	if (alg_cpu_state->cpu != cpu)
203 		pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
204 				cpu, alg_cpu_state->cpu);
205 
206 	if (alg_cpu_state->flusher_engaged) {
207 		flist = per_cpu_ptr(mcryptd_flist, cpu);
208 		mutex_lock(&flist->lock);
209 		list_del(&alg_cpu_state->flush_list);
210 		alg_cpu_state->flusher_engaged = false;
211 		mutex_unlock(&flist->lock);
212 		alg_state->flusher(alg_cpu_state);
213 	}
214 }
215 EXPORT_SYMBOL_GPL(mcryptd_flusher);
216 
mcryptd_get_queue(struct crypto_tfm * tfm)217 static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
218 {
219 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
220 	struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
221 
222 	return ictx->queue;
223 }
224 
mcryptd_alloc_instance(struct crypto_alg * alg,unsigned int head,unsigned int tail)225 static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
226 				   unsigned int tail)
227 {
228 	char *p;
229 	struct crypto_instance *inst;
230 	int err;
231 
232 	p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
233 	if (!p)
234 		return ERR_PTR(-ENOMEM);
235 
236 	inst = (void *)(p + head);
237 
238 	err = -ENAMETOOLONG;
239 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
240 		    "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
241 		goto out_free_inst;
242 
243 	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
244 
245 	inst->alg.cra_priority = alg->cra_priority + 50;
246 	inst->alg.cra_blocksize = alg->cra_blocksize;
247 	inst->alg.cra_alignmask = alg->cra_alignmask;
248 
249 out:
250 	return p;
251 
252 out_free_inst:
253 	kfree(p);
254 	p = ERR_PTR(err);
255 	goto out;
256 }
257 
mcryptd_hash_init_tfm(struct crypto_tfm * tfm)258 static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
259 {
260 	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
261 	struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
262 	struct crypto_shash_spawn *spawn = &ictx->spawn;
263 	struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
264 	struct crypto_shash *hash;
265 
266 	hash = crypto_spawn_shash(spawn);
267 	if (IS_ERR(hash))
268 		return PTR_ERR(hash);
269 
270 	ctx->child = hash;
271 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
272 				 sizeof(struct mcryptd_hash_request_ctx) +
273 				 crypto_shash_descsize(hash));
274 	return 0;
275 }
276 
mcryptd_hash_exit_tfm(struct crypto_tfm * tfm)277 static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
278 {
279 	struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
280 
281 	crypto_free_shash(ctx->child);
282 }
283 
mcryptd_hash_setkey(struct crypto_ahash * parent,const u8 * key,unsigned int keylen)284 static int mcryptd_hash_setkey(struct crypto_ahash *parent,
285 				   const u8 *key, unsigned int keylen)
286 {
287 	struct mcryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
288 	struct crypto_shash *child = ctx->child;
289 	int err;
290 
291 	crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
292 	crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
293 				      CRYPTO_TFM_REQ_MASK);
294 	err = crypto_shash_setkey(child, key, keylen);
295 	crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
296 				       CRYPTO_TFM_RES_MASK);
297 	return err;
298 }
299 
mcryptd_hash_enqueue(struct ahash_request * req,crypto_completion_t complete)300 static int mcryptd_hash_enqueue(struct ahash_request *req,
301 				crypto_completion_t complete)
302 {
303 	int ret;
304 
305 	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
306 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
307 	struct mcryptd_queue *queue =
308 		mcryptd_get_queue(crypto_ahash_tfm(tfm));
309 
310 	rctx->complete = req->base.complete;
311 	req->base.complete = complete;
312 
313 	ret = mcryptd_enqueue_request(queue, &req->base, rctx);
314 
315 	return ret;
316 }
317 
mcryptd_hash_init(struct crypto_async_request * req_async,int err)318 static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
319 {
320 	struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
321 	struct crypto_shash *child = ctx->child;
322 	struct ahash_request *req = ahash_request_cast(req_async);
323 	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
324 	struct shash_desc *desc = &rctx->desc;
325 
326 	if (unlikely(err == -EINPROGRESS))
327 		goto out;
328 
329 	desc->tfm = child;
330 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
331 
332 	err = crypto_shash_init(desc);
333 
334 	req->base.complete = rctx->complete;
335 
336 out:
337 	local_bh_disable();
338 	rctx->complete(&req->base, err);
339 	local_bh_enable();
340 }
341 
mcryptd_hash_init_enqueue(struct ahash_request * req)342 static int mcryptd_hash_init_enqueue(struct ahash_request *req)
343 {
344 	return mcryptd_hash_enqueue(req, mcryptd_hash_init);
345 }
346 
mcryptd_hash_update(struct crypto_async_request * req_async,int err)347 static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
348 {
349 	struct ahash_request *req = ahash_request_cast(req_async);
350 	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
351 
352 	if (unlikely(err == -EINPROGRESS))
353 		goto out;
354 
355 	err = shash_ahash_mcryptd_update(req, &rctx->desc);
356 	if (err) {
357 		req->base.complete = rctx->complete;
358 		goto out;
359 	}
360 
361 	return;
362 out:
363 	local_bh_disable();
364 	rctx->complete(&req->base, err);
365 	local_bh_enable();
366 }
367 
mcryptd_hash_update_enqueue(struct ahash_request * req)368 static int mcryptd_hash_update_enqueue(struct ahash_request *req)
369 {
370 	return mcryptd_hash_enqueue(req, mcryptd_hash_update);
371 }
372 
mcryptd_hash_final(struct crypto_async_request * req_async,int err)373 static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
374 {
375 	struct ahash_request *req = ahash_request_cast(req_async);
376 	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
377 
378 	if (unlikely(err == -EINPROGRESS))
379 		goto out;
380 
381 	err = shash_ahash_mcryptd_final(req, &rctx->desc);
382 	if (err) {
383 		req->base.complete = rctx->complete;
384 		goto out;
385 	}
386 
387 	return;
388 out:
389 	local_bh_disable();
390 	rctx->complete(&req->base, err);
391 	local_bh_enable();
392 }
393 
mcryptd_hash_final_enqueue(struct ahash_request * req)394 static int mcryptd_hash_final_enqueue(struct ahash_request *req)
395 {
396 	return mcryptd_hash_enqueue(req, mcryptd_hash_final);
397 }
398 
mcryptd_hash_finup(struct crypto_async_request * req_async,int err)399 static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
400 {
401 	struct ahash_request *req = ahash_request_cast(req_async);
402 	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
403 
404 	if (unlikely(err == -EINPROGRESS))
405 		goto out;
406 
407 	err = shash_ahash_mcryptd_finup(req, &rctx->desc);
408 
409 	if (err) {
410 		req->base.complete = rctx->complete;
411 		goto out;
412 	}
413 
414 	return;
415 out:
416 	local_bh_disable();
417 	rctx->complete(&req->base, err);
418 	local_bh_enable();
419 }
420 
mcryptd_hash_finup_enqueue(struct ahash_request * req)421 static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
422 {
423 	return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
424 }
425 
mcryptd_hash_digest(struct crypto_async_request * req_async,int err)426 static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
427 {
428 	struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
429 	struct crypto_shash *child = ctx->child;
430 	struct ahash_request *req = ahash_request_cast(req_async);
431 	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
432 	struct shash_desc *desc = &rctx->desc;
433 
434 	if (unlikely(err == -EINPROGRESS))
435 		goto out;
436 
437 	desc->tfm = child;
438 	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;  /* check this again */
439 
440 	err = shash_ahash_mcryptd_digest(req, desc);
441 
442 	if (err) {
443 		req->base.complete = rctx->complete;
444 		goto out;
445 	}
446 
447 	return;
448 out:
449 	local_bh_disable();
450 	rctx->complete(&req->base, err);
451 	local_bh_enable();
452 }
453 
mcryptd_hash_digest_enqueue(struct ahash_request * req)454 static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
455 {
456 	return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
457 }
458 
mcryptd_hash_export(struct ahash_request * req,void * out)459 static int mcryptd_hash_export(struct ahash_request *req, void *out)
460 {
461 	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
462 
463 	return crypto_shash_export(&rctx->desc, out);
464 }
465 
mcryptd_hash_import(struct ahash_request * req,const void * in)466 static int mcryptd_hash_import(struct ahash_request *req, const void *in)
467 {
468 	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
469 
470 	return crypto_shash_import(&rctx->desc, in);
471 }
472 
mcryptd_create_hash(struct crypto_template * tmpl,struct rtattr ** tb,struct mcryptd_queue * queue)473 static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
474 			      struct mcryptd_queue *queue)
475 {
476 	struct hashd_instance_ctx *ctx;
477 	struct ahash_instance *inst;
478 	struct shash_alg *salg;
479 	struct crypto_alg *alg;
480 	int err;
481 
482 	salg = shash_attr_alg(tb[1], 0, 0);
483 	if (IS_ERR(salg))
484 		return PTR_ERR(salg);
485 
486 	alg = &salg->base;
487 	pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
488 	inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
489 					sizeof(*ctx));
490 	err = PTR_ERR(inst);
491 	if (IS_ERR(inst))
492 		goto out_put_alg;
493 
494 	ctx = ahash_instance_ctx(inst);
495 	ctx->queue = queue;
496 
497 	err = crypto_init_shash_spawn(&ctx->spawn, salg,
498 				      ahash_crypto_instance(inst));
499 	if (err)
500 		goto out_free_inst;
501 
502 	inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
503 
504 	inst->alg.halg.digestsize = salg->digestsize;
505 	inst->alg.halg.statesize = salg->statesize;
506 	inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
507 
508 	inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
509 	inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
510 
511 	inst->alg.init   = mcryptd_hash_init_enqueue;
512 	inst->alg.update = mcryptd_hash_update_enqueue;
513 	inst->alg.final  = mcryptd_hash_final_enqueue;
514 	inst->alg.finup  = mcryptd_hash_finup_enqueue;
515 	inst->alg.export = mcryptd_hash_export;
516 	inst->alg.import = mcryptd_hash_import;
517 	inst->alg.setkey = mcryptd_hash_setkey;
518 	inst->alg.digest = mcryptd_hash_digest_enqueue;
519 
520 	err = ahash_register_instance(tmpl, inst);
521 	if (err) {
522 		crypto_drop_shash(&ctx->spawn);
523 out_free_inst:
524 		kfree(inst);
525 	}
526 
527 out_put_alg:
528 	crypto_mod_put(alg);
529 	return err;
530 }
531 
532 static struct mcryptd_queue mqueue;
533 
mcryptd_create(struct crypto_template * tmpl,struct rtattr ** tb)534 static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
535 {
536 	struct crypto_attr_type *algt;
537 
538 	algt = crypto_get_attr_type(tb);
539 	if (IS_ERR(algt))
540 		return PTR_ERR(algt);
541 
542 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
543 	case CRYPTO_ALG_TYPE_DIGEST:
544 		return mcryptd_create_hash(tmpl, tb, &mqueue);
545 	break;
546 	}
547 
548 	return -EINVAL;
549 }
550 
mcryptd_free(struct crypto_instance * inst)551 static void mcryptd_free(struct crypto_instance *inst)
552 {
553 	struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
554 	struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
555 
556 	switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
557 	case CRYPTO_ALG_TYPE_AHASH:
558 		crypto_drop_shash(&hctx->spawn);
559 		kfree(ahash_instance(inst));
560 		return;
561 	default:
562 		crypto_drop_spawn(&ctx->spawn);
563 		kfree(inst);
564 	}
565 }
566 
567 static struct crypto_template mcryptd_tmpl = {
568 	.name = "mcryptd",
569 	.create = mcryptd_create,
570 	.free = mcryptd_free,
571 	.module = THIS_MODULE,
572 };
573 
mcryptd_alloc_ahash(const char * alg_name,u32 type,u32 mask)574 struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
575 					u32 type, u32 mask)
576 {
577 	char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
578 	struct crypto_ahash *tfm;
579 
580 	if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
581 		     "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
582 		return ERR_PTR(-EINVAL);
583 	tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
584 	if (IS_ERR(tfm))
585 		return ERR_CAST(tfm);
586 	if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
587 		crypto_free_ahash(tfm);
588 		return ERR_PTR(-EINVAL);
589 	}
590 
591 	return __mcryptd_ahash_cast(tfm);
592 }
593 EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
594 
shash_ahash_mcryptd_digest(struct ahash_request * req,struct shash_desc * desc)595 int shash_ahash_mcryptd_digest(struct ahash_request *req,
596 			       struct shash_desc *desc)
597 {
598 	int err;
599 
600 	err = crypto_shash_init(desc) ?:
601 	      shash_ahash_mcryptd_finup(req, desc);
602 
603 	return err;
604 }
605 EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
606 
shash_ahash_mcryptd_update(struct ahash_request * req,struct shash_desc * desc)607 int shash_ahash_mcryptd_update(struct ahash_request *req,
608 			       struct shash_desc *desc)
609 {
610 	struct crypto_shash *tfm = desc->tfm;
611 	struct shash_alg *shash = crypto_shash_alg(tfm);
612 
613 	/* alignment is to be done by multi-buffer crypto algorithm if needed */
614 
615 	return shash->update(desc, NULL, 0);
616 }
617 EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
618 
shash_ahash_mcryptd_finup(struct ahash_request * req,struct shash_desc * desc)619 int shash_ahash_mcryptd_finup(struct ahash_request *req,
620 			      struct shash_desc *desc)
621 {
622 	struct crypto_shash *tfm = desc->tfm;
623 	struct shash_alg *shash = crypto_shash_alg(tfm);
624 
625 	/* alignment is to be done by multi-buffer crypto algorithm if needed */
626 
627 	return shash->finup(desc, NULL, 0, req->result);
628 }
629 EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
630 
shash_ahash_mcryptd_final(struct ahash_request * req,struct shash_desc * desc)631 int shash_ahash_mcryptd_final(struct ahash_request *req,
632 			      struct shash_desc *desc)
633 {
634 	struct crypto_shash *tfm = desc->tfm;
635 	struct shash_alg *shash = crypto_shash_alg(tfm);
636 
637 	/* alignment is to be done by multi-buffer crypto algorithm if needed */
638 
639 	return shash->final(desc, req->result);
640 }
641 EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
642 
mcryptd_ahash_child(struct mcryptd_ahash * tfm)643 struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
644 {
645 	struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
646 
647 	return ctx->child;
648 }
649 EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
650 
mcryptd_shash_desc(struct ahash_request * req)651 struct shash_desc *mcryptd_shash_desc(struct ahash_request *req)
652 {
653 	struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
654 	return &rctx->desc;
655 }
656 EXPORT_SYMBOL_GPL(mcryptd_shash_desc);
657 
mcryptd_free_ahash(struct mcryptd_ahash * tfm)658 void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
659 {
660 	crypto_free_ahash(&tfm->base);
661 }
662 EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
663 
664 
mcryptd_init(void)665 static int __init mcryptd_init(void)
666 {
667 	int err, cpu;
668 	struct mcryptd_flush_list *flist;
669 
670 	mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
671 	for_each_possible_cpu(cpu) {
672 		flist = per_cpu_ptr(mcryptd_flist, cpu);
673 		INIT_LIST_HEAD(&flist->list);
674 		mutex_init(&flist->lock);
675 	}
676 
677 	err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
678 	if (err) {
679 		free_percpu(mcryptd_flist);
680 		return err;
681 	}
682 
683 	err = crypto_register_template(&mcryptd_tmpl);
684 	if (err) {
685 		mcryptd_fini_queue(&mqueue);
686 		free_percpu(mcryptd_flist);
687 	}
688 
689 	return err;
690 }
691 
mcryptd_exit(void)692 static void __exit mcryptd_exit(void)
693 {
694 	mcryptd_fini_queue(&mqueue);
695 	crypto_unregister_template(&mcryptd_tmpl);
696 	free_percpu(mcryptd_flist);
697 }
698 
699 subsys_initcall(mcryptd_init);
700 module_exit(mcryptd_exit);
701 
702 MODULE_LICENSE("GPL");
703 MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
704 MODULE_ALIAS_CRYPTO("mcryptd");
705