• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * pcrypt - Parallel crypto wrapper.
3  *
4  * Copyright (C) 2009 secunet Security Networks AG
5  * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 #include <crypto/algapi.h>
22 #include <crypto/internal/aead.h>
23 #include <linux/atomic.h>
24 #include <linux/err.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/notifier.h>
29 #include <linux/kobject.h>
30 #include <linux/cpu.h>
31 #include <crypto/pcrypt.h>
32 
33 struct padata_pcrypt {
34 	struct padata_instance *pinst;
35 	struct workqueue_struct *wq;
36 
37 	/*
38 	 * Cpumask for callback CPUs. It should be
39 	 * equal to serial cpumask of corresponding padata instance,
40 	 * so it is updated when padata notifies us about serial
41 	 * cpumask change.
42 	 *
43 	 * cb_cpumask is protected by RCU. This fact prevents us from
44 	 * using cpumask_var_t directly because the actual type of
45 	 * cpumsak_var_t depends on kernel configuration(particularly on
46 	 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
47 	 * cpumask_var_t may be either a pointer to the struct cpumask
48 	 * or a variable allocated on the stack. Thus we can not safely use
49 	 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
50 	 * rcu_dereference. So cpumask_var_t is wrapped with struct
51 	 * pcrypt_cpumask which makes possible to use it with RCU.
52 	 */
53 	struct pcrypt_cpumask {
54 		cpumask_var_t mask;
55 	} *cb_cpumask;
56 	struct notifier_block nblock;
57 };
58 
59 static struct padata_pcrypt pencrypt;
60 static struct padata_pcrypt pdecrypt;
61 static struct kset           *pcrypt_kset;
62 
63 struct pcrypt_instance_ctx {
64 	struct crypto_aead_spawn spawn;
65 	atomic_t tfm_count;
66 };
67 
68 struct pcrypt_aead_ctx {
69 	struct crypto_aead *child;
70 	unsigned int cb_cpu;
71 };
72 
pcrypt_do_parallel(struct padata_priv * padata,unsigned int * cb_cpu,struct padata_pcrypt * pcrypt)73 static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
74 			      struct padata_pcrypt *pcrypt)
75 {
76 	unsigned int cpu_index, cpu, i;
77 	struct pcrypt_cpumask *cpumask;
78 
79 	cpu = *cb_cpu;
80 
81 	rcu_read_lock_bh();
82 	cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
83 	if (cpumask_test_cpu(cpu, cpumask->mask))
84 			goto out;
85 
86 	if (!cpumask_weight(cpumask->mask))
87 			goto out;
88 
89 	cpu_index = cpu % cpumask_weight(cpumask->mask);
90 
91 	cpu = cpumask_first(cpumask->mask);
92 	for (i = 0; i < cpu_index; i++)
93 		cpu = cpumask_next(cpu, cpumask->mask);
94 
95 	*cb_cpu = cpu;
96 
97 out:
98 	rcu_read_unlock_bh();
99 	return padata_do_parallel(pcrypt->pinst, padata, cpu);
100 }
101 
pcrypt_aead_setkey(struct crypto_aead * parent,const u8 * key,unsigned int keylen)102 static int pcrypt_aead_setkey(struct crypto_aead *parent,
103 			      const u8 *key, unsigned int keylen)
104 {
105 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
106 
107 	return crypto_aead_setkey(ctx->child, key, keylen);
108 }
109 
pcrypt_aead_setauthsize(struct crypto_aead * parent,unsigned int authsize)110 static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
111 				   unsigned int authsize)
112 {
113 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
114 
115 	return crypto_aead_setauthsize(ctx->child, authsize);
116 }
117 
pcrypt_aead_serial(struct padata_priv * padata)118 static void pcrypt_aead_serial(struct padata_priv *padata)
119 {
120 	struct pcrypt_request *preq = pcrypt_padata_request(padata);
121 	struct aead_request *req = pcrypt_request_ctx(preq);
122 
123 	aead_request_complete(req->base.data, padata->info);
124 }
125 
pcrypt_aead_done(struct crypto_async_request * areq,int err)126 static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
127 {
128 	struct aead_request *req = areq->data;
129 	struct pcrypt_request *preq = aead_request_ctx(req);
130 	struct padata_priv *padata = pcrypt_request_padata(preq);
131 
132 	padata->info = err;
133 
134 	padata_do_serial(padata);
135 }
136 
pcrypt_aead_enc(struct padata_priv * padata)137 static void pcrypt_aead_enc(struct padata_priv *padata)
138 {
139 	struct pcrypt_request *preq = pcrypt_padata_request(padata);
140 	struct aead_request *req = pcrypt_request_ctx(preq);
141 
142 	padata->info = crypto_aead_encrypt(req);
143 
144 	if (padata->info == -EINPROGRESS)
145 		return;
146 
147 	padata_do_serial(padata);
148 }
149 
pcrypt_aead_encrypt(struct aead_request * req)150 static int pcrypt_aead_encrypt(struct aead_request *req)
151 {
152 	int err;
153 	struct pcrypt_request *preq = aead_request_ctx(req);
154 	struct aead_request *creq = pcrypt_request_ctx(preq);
155 	struct padata_priv *padata = pcrypt_request_padata(preq);
156 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
157 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
158 	u32 flags = aead_request_flags(req);
159 
160 	memset(padata, 0, sizeof(struct padata_priv));
161 
162 	padata->parallel = pcrypt_aead_enc;
163 	padata->serial = pcrypt_aead_serial;
164 
165 	aead_request_set_tfm(creq, ctx->child);
166 	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
167 				  pcrypt_aead_done, req);
168 	aead_request_set_crypt(creq, req->src, req->dst,
169 			       req->cryptlen, req->iv);
170 	aead_request_set_ad(creq, req->assoclen);
171 
172 	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
173 	if (!err)
174 		return -EINPROGRESS;
175 
176 	return err;
177 }
178 
pcrypt_aead_dec(struct padata_priv * padata)179 static void pcrypt_aead_dec(struct padata_priv *padata)
180 {
181 	struct pcrypt_request *preq = pcrypt_padata_request(padata);
182 	struct aead_request *req = pcrypt_request_ctx(preq);
183 
184 	padata->info = crypto_aead_decrypt(req);
185 
186 	if (padata->info == -EINPROGRESS)
187 		return;
188 
189 	padata_do_serial(padata);
190 }
191 
pcrypt_aead_decrypt(struct aead_request * req)192 static int pcrypt_aead_decrypt(struct aead_request *req)
193 {
194 	int err;
195 	struct pcrypt_request *preq = aead_request_ctx(req);
196 	struct aead_request *creq = pcrypt_request_ctx(preq);
197 	struct padata_priv *padata = pcrypt_request_padata(preq);
198 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
199 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
200 	u32 flags = aead_request_flags(req);
201 
202 	memset(padata, 0, sizeof(struct padata_priv));
203 
204 	padata->parallel = pcrypt_aead_dec;
205 	padata->serial = pcrypt_aead_serial;
206 
207 	aead_request_set_tfm(creq, ctx->child);
208 	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
209 				  pcrypt_aead_done, req);
210 	aead_request_set_crypt(creq, req->src, req->dst,
211 			       req->cryptlen, req->iv);
212 	aead_request_set_ad(creq, req->assoclen);
213 
214 	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
215 	if (!err)
216 		return -EINPROGRESS;
217 
218 	return err;
219 }
220 
pcrypt_aead_init_tfm(struct crypto_aead * tfm)221 static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
222 {
223 	int cpu, cpu_index;
224 	struct aead_instance *inst = aead_alg_instance(tfm);
225 	struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
226 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
227 	struct crypto_aead *cipher;
228 
229 	cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
230 		    cpumask_weight(cpu_online_mask);
231 
232 	ctx->cb_cpu = cpumask_first(cpu_online_mask);
233 	for (cpu = 0; cpu < cpu_index; cpu++)
234 		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
235 
236 	cipher = crypto_spawn_aead(&ictx->spawn);
237 
238 	if (IS_ERR(cipher))
239 		return PTR_ERR(cipher);
240 
241 	ctx->child = cipher;
242 	crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
243 				     sizeof(struct aead_request) +
244 				     crypto_aead_reqsize(cipher));
245 
246 	return 0;
247 }
248 
pcrypt_aead_exit_tfm(struct crypto_aead * tfm)249 static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
250 {
251 	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
252 
253 	crypto_free_aead(ctx->child);
254 }
255 
pcrypt_free(struct aead_instance * inst)256 static void pcrypt_free(struct aead_instance *inst)
257 {
258 	struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
259 
260 	crypto_drop_aead(&ctx->spawn);
261 	kfree(inst);
262 }
263 
pcrypt_init_instance(struct crypto_instance * inst,struct crypto_alg * alg)264 static int pcrypt_init_instance(struct crypto_instance *inst,
265 				struct crypto_alg *alg)
266 {
267 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
268 		     "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
269 		return -ENAMETOOLONG;
270 
271 	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
272 
273 	inst->alg.cra_priority = alg->cra_priority + 100;
274 	inst->alg.cra_blocksize = alg->cra_blocksize;
275 	inst->alg.cra_alignmask = alg->cra_alignmask;
276 
277 	return 0;
278 }
279 
pcrypt_create_aead(struct crypto_template * tmpl,struct rtattr ** tb,u32 type,u32 mask)280 static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
281 			      u32 type, u32 mask)
282 {
283 	struct pcrypt_instance_ctx *ctx;
284 	struct crypto_attr_type *algt;
285 	struct aead_instance *inst;
286 	struct aead_alg *alg;
287 	const char *name;
288 	int err;
289 
290 	algt = crypto_get_attr_type(tb);
291 	if (IS_ERR(algt))
292 		return PTR_ERR(algt);
293 
294 	name = crypto_attr_alg_name(tb[1]);
295 	if (IS_ERR(name))
296 		return PTR_ERR(name);
297 
298 	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
299 	if (!inst)
300 		return -ENOMEM;
301 
302 	ctx = aead_instance_ctx(inst);
303 	crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
304 
305 	err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
306 	if (err)
307 		goto out_free_inst;
308 
309 	alg = crypto_spawn_aead_alg(&ctx->spawn);
310 	err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
311 	if (err)
312 		goto out_drop_aead;
313 
314 	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
315 
316 	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
317 	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
318 
319 	inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
320 
321 	inst->alg.init = pcrypt_aead_init_tfm;
322 	inst->alg.exit = pcrypt_aead_exit_tfm;
323 
324 	inst->alg.setkey = pcrypt_aead_setkey;
325 	inst->alg.setauthsize = pcrypt_aead_setauthsize;
326 	inst->alg.encrypt = pcrypt_aead_encrypt;
327 	inst->alg.decrypt = pcrypt_aead_decrypt;
328 
329 	inst->free = pcrypt_free;
330 
331 	err = aead_register_instance(tmpl, inst);
332 	if (err)
333 		goto out_drop_aead;
334 
335 out:
336 	return err;
337 
338 out_drop_aead:
339 	crypto_drop_aead(&ctx->spawn);
340 out_free_inst:
341 	kfree(inst);
342 	goto out;
343 }
344 
pcrypt_create(struct crypto_template * tmpl,struct rtattr ** tb)345 static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
346 {
347 	struct crypto_attr_type *algt;
348 
349 	algt = crypto_get_attr_type(tb);
350 	if (IS_ERR(algt))
351 		return PTR_ERR(algt);
352 
353 	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
354 	case CRYPTO_ALG_TYPE_AEAD:
355 		return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
356 	}
357 
358 	return -EINVAL;
359 }
360 
pcrypt_cpumask_change_notify(struct notifier_block * self,unsigned long val,void * data)361 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
362 					unsigned long val, void *data)
363 {
364 	struct padata_pcrypt *pcrypt;
365 	struct pcrypt_cpumask *new_mask, *old_mask;
366 	struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
367 
368 	if (!(val & PADATA_CPU_SERIAL))
369 		return 0;
370 
371 	pcrypt = container_of(self, struct padata_pcrypt, nblock);
372 	new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
373 	if (!new_mask)
374 		return -ENOMEM;
375 	if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
376 		kfree(new_mask);
377 		return -ENOMEM;
378 	}
379 
380 	old_mask = pcrypt->cb_cpumask;
381 
382 	cpumask_copy(new_mask->mask, cpumask->cbcpu);
383 	rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
384 	synchronize_rcu_bh();
385 
386 	free_cpumask_var(old_mask->mask);
387 	kfree(old_mask);
388 	return 0;
389 }
390 
pcrypt_sysfs_add(struct padata_instance * pinst,const char * name)391 static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
392 {
393 	int ret;
394 
395 	pinst->kobj.kset = pcrypt_kset;
396 	ret = kobject_add(&pinst->kobj, NULL, "%s", name);
397 	if (!ret)
398 		kobject_uevent(&pinst->kobj, KOBJ_ADD);
399 
400 	return ret;
401 }
402 
pcrypt_init_padata(struct padata_pcrypt * pcrypt,const char * name)403 static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
404 			      const char *name)
405 {
406 	int ret = -ENOMEM;
407 	struct pcrypt_cpumask *mask;
408 
409 	get_online_cpus();
410 
411 	pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
412 				     1, name);
413 	if (!pcrypt->wq)
414 		goto err;
415 
416 	pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
417 	if (!pcrypt->pinst)
418 		goto err_destroy_workqueue;
419 
420 	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
421 	if (!mask)
422 		goto err_free_padata;
423 	if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
424 		kfree(mask);
425 		goto err_free_padata;
426 	}
427 
428 	cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
429 	rcu_assign_pointer(pcrypt->cb_cpumask, mask);
430 
431 	pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
432 	ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
433 	if (ret)
434 		goto err_free_cpumask;
435 
436 	ret = pcrypt_sysfs_add(pcrypt->pinst, name);
437 	if (ret)
438 		goto err_unregister_notifier;
439 
440 	put_online_cpus();
441 
442 	return ret;
443 
444 err_unregister_notifier:
445 	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
446 err_free_cpumask:
447 	free_cpumask_var(mask->mask);
448 	kfree(mask);
449 err_free_padata:
450 	padata_free(pcrypt->pinst);
451 err_destroy_workqueue:
452 	destroy_workqueue(pcrypt->wq);
453 err:
454 	put_online_cpus();
455 
456 	return ret;
457 }
458 
pcrypt_fini_padata(struct padata_pcrypt * pcrypt)459 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
460 {
461 	free_cpumask_var(pcrypt->cb_cpumask->mask);
462 	kfree(pcrypt->cb_cpumask);
463 
464 	padata_stop(pcrypt->pinst);
465 	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
466 	destroy_workqueue(pcrypt->wq);
467 	padata_free(pcrypt->pinst);
468 }
469 
470 static struct crypto_template pcrypt_tmpl = {
471 	.name = "pcrypt",
472 	.create = pcrypt_create,
473 	.module = THIS_MODULE,
474 };
475 
pcrypt_init(void)476 static int __init pcrypt_init(void)
477 {
478 	int err = -ENOMEM;
479 
480 	pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
481 	if (!pcrypt_kset)
482 		goto err;
483 
484 	err = pcrypt_init_padata(&pencrypt, "pencrypt");
485 	if (err)
486 		goto err_unreg_kset;
487 
488 	err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
489 	if (err)
490 		goto err_deinit_pencrypt;
491 
492 	padata_start(pencrypt.pinst);
493 	padata_start(pdecrypt.pinst);
494 
495 	return crypto_register_template(&pcrypt_tmpl);
496 
497 err_deinit_pencrypt:
498 	pcrypt_fini_padata(&pencrypt);
499 err_unreg_kset:
500 	kset_unregister(pcrypt_kset);
501 err:
502 	return err;
503 }
504 
pcrypt_exit(void)505 static void __exit pcrypt_exit(void)
506 {
507 	crypto_unregister_template(&pcrypt_tmpl);
508 
509 	pcrypt_fini_padata(&pencrypt);
510 	pcrypt_fini_padata(&pdecrypt);
511 
512 	kset_unregister(pcrypt_kset);
513 }
514 
515 module_init(pcrypt_init);
516 module_exit(pcrypt_exit);
517 
518 MODULE_LICENSE("GPL");
519 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
520 MODULE_DESCRIPTION("Parallel crypto wrapper");
521 MODULE_ALIAS_CRYPTO("pcrypt");
522