1 /*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16 #include <crypto/internal/hash.h>
17 #include <crypto/scatterwalk.h>
18 #include <linux/bug.h>
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/seq_file.h>
25 #include <linux/cryptouser.h>
26 #include <net/netlink.h>
27
28 #include "internal.h"
29
30 struct ahash_request_priv {
31 crypto_completion_t complete;
32 void *data;
33 u8 *result;
34 u32 flags;
35 void *ubuf[] CRYPTO_MINALIGN_ATTR;
36 };
37
crypto_ahash_alg(struct crypto_ahash * hash)38 static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
39 {
40 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
41 halg);
42 }
43
hash_walk_next(struct crypto_hash_walk * walk)44 static int hash_walk_next(struct crypto_hash_walk *walk)
45 {
46 unsigned int alignmask = walk->alignmask;
47 unsigned int offset = walk->offset;
48 unsigned int nbytes = min(walk->entrylen,
49 ((unsigned int)(PAGE_SIZE)) - offset);
50
51 if (walk->flags & CRYPTO_ALG_ASYNC)
52 walk->data = kmap(walk->pg);
53 else
54 walk->data = kmap_atomic(walk->pg);
55 walk->data += offset;
56
57 if (offset & alignmask) {
58 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
59 if (nbytes > unaligned)
60 nbytes = unaligned;
61 }
62
63 walk->entrylen -= nbytes;
64 return nbytes;
65 }
66
hash_walk_new_entry(struct crypto_hash_walk * walk)67 static int hash_walk_new_entry(struct crypto_hash_walk *walk)
68 {
69 struct scatterlist *sg;
70
71 sg = walk->sg;
72 walk->offset = sg->offset;
73 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
74 walk->offset = offset_in_page(walk->offset);
75 walk->entrylen = sg->length;
76
77 if (walk->entrylen > walk->total)
78 walk->entrylen = walk->total;
79 walk->total -= walk->entrylen;
80
81 return hash_walk_next(walk);
82 }
83
crypto_hash_walk_done(struct crypto_hash_walk * walk,int err)84 int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
85 {
86 unsigned int alignmask = walk->alignmask;
87 unsigned int nbytes = walk->entrylen;
88
89 walk->data -= walk->offset;
90
91 if (nbytes && walk->offset & alignmask && !err) {
92 walk->offset = ALIGN(walk->offset, alignmask + 1);
93 walk->data += walk->offset;
94
95 nbytes = min(nbytes,
96 ((unsigned int)(PAGE_SIZE)) - walk->offset);
97 walk->entrylen -= nbytes;
98
99 return nbytes;
100 }
101
102 if (walk->flags & CRYPTO_ALG_ASYNC)
103 kunmap(walk->pg);
104 else {
105 kunmap_atomic(walk->data);
106 /*
107 * The may sleep test only makes sense for sync users.
108 * Async users don't need to sleep here anyway.
109 */
110 crypto_yield(walk->flags);
111 }
112
113 if (err)
114 return err;
115
116 if (nbytes) {
117 walk->offset = 0;
118 walk->pg++;
119 return hash_walk_next(walk);
120 }
121
122 if (!walk->total)
123 return 0;
124
125 walk->sg = scatterwalk_sg_next(walk->sg);
126
127 return hash_walk_new_entry(walk);
128 }
129 EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
130
crypto_hash_walk_first(struct ahash_request * req,struct crypto_hash_walk * walk)131 int crypto_hash_walk_first(struct ahash_request *req,
132 struct crypto_hash_walk *walk)
133 {
134 walk->total = req->nbytes;
135
136 if (!walk->total) {
137 walk->entrylen = 0;
138 return 0;
139 }
140
141 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
142 walk->sg = req->src;
143 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
144
145 return hash_walk_new_entry(walk);
146 }
147 EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
148
crypto_ahash_walk_first(struct ahash_request * req,struct crypto_hash_walk * walk)149 int crypto_ahash_walk_first(struct ahash_request *req,
150 struct crypto_hash_walk *walk)
151 {
152 walk->total = req->nbytes;
153
154 if (!walk->total) {
155 walk->entrylen = 0;
156 return 0;
157 }
158
159 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
160 walk->sg = req->src;
161 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
162 walk->flags |= CRYPTO_ALG_ASYNC;
163
164 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
165
166 return hash_walk_new_entry(walk);
167 }
168 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
169
crypto_hash_walk_first_compat(struct hash_desc * hdesc,struct crypto_hash_walk * walk,struct scatterlist * sg,unsigned int len)170 int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
171 struct crypto_hash_walk *walk,
172 struct scatterlist *sg, unsigned int len)
173 {
174 walk->total = len;
175
176 if (!walk->total) {
177 walk->entrylen = 0;
178 return 0;
179 }
180
181 walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
182 walk->sg = sg;
183 walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
184
185 return hash_walk_new_entry(walk);
186 }
187
ahash_setkey_unaligned(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)188 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
189 unsigned int keylen)
190 {
191 unsigned long alignmask = crypto_ahash_alignmask(tfm);
192 int ret;
193 u8 *buffer, *alignbuffer;
194 unsigned long absize;
195
196 absize = keylen + alignmask;
197 buffer = kmalloc(absize, GFP_KERNEL);
198 if (!buffer)
199 return -ENOMEM;
200
201 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
202 memcpy(alignbuffer, key, keylen);
203 ret = tfm->setkey(tfm, alignbuffer, keylen);
204 kzfree(buffer);
205 return ret;
206 }
207
crypto_ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)208 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
209 unsigned int keylen)
210 {
211 unsigned long alignmask = crypto_ahash_alignmask(tfm);
212
213 if ((unsigned long)key & alignmask)
214 return ahash_setkey_unaligned(tfm, key, keylen);
215
216 return tfm->setkey(tfm, key, keylen);
217 }
218 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
219
ahash_nosetkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)220 static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
221 unsigned int keylen)
222 {
223 return -ENOSYS;
224 }
225
ahash_align_buffer_size(unsigned len,unsigned long mask)226 static inline unsigned int ahash_align_buffer_size(unsigned len,
227 unsigned long mask)
228 {
229 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
230 }
231
ahash_save_req(struct ahash_request * req,crypto_completion_t cplt)232 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
233 {
234 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
235 unsigned long alignmask = crypto_ahash_alignmask(tfm);
236 unsigned int ds = crypto_ahash_digestsize(tfm);
237 struct ahash_request_priv *priv;
238
239 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
240 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
241 GFP_KERNEL : GFP_ATOMIC);
242 if (!priv)
243 return -ENOMEM;
244
245 /*
246 * WARNING: Voodoo programming below!
247 *
248 * The code below is obscure and hard to understand, thus explanation
249 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
250 * to understand the layout of structures used here!
251 *
252 * The code here will replace portions of the ORIGINAL request with
253 * pointers to new code and buffers so the hashing operation can store
254 * the result in aligned buffer. We will call the modified request
255 * an ADJUSTED request.
256 *
257 * The newly mangled request will look as such:
258 *
259 * req {
260 * .result = ADJUSTED[new aligned buffer]
261 * .base.complete = ADJUSTED[pointer to completion function]
262 * .base.data = ADJUSTED[*req (pointer to self)]
263 * .priv = ADJUSTED[new priv] {
264 * .result = ORIGINAL(result)
265 * .complete = ORIGINAL(base.complete)
266 * .data = ORIGINAL(base.data)
267 * }
268 */
269
270 priv->result = req->result;
271 priv->complete = req->base.complete;
272 priv->data = req->base.data;
273 priv->flags = req->base.flags;
274
275 /*
276 * WARNING: We do not backup req->priv here! The req->priv
277 * is for internal use of the Crypto API and the
278 * user must _NOT_ _EVER_ depend on it's content!
279 */
280
281 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
282 req->base.complete = cplt;
283 req->base.data = req;
284 req->priv = priv;
285
286 return 0;
287 }
288
ahash_restore_req(struct ahash_request * req,int err)289 static void ahash_restore_req(struct ahash_request *req, int err)
290 {
291 struct ahash_request_priv *priv = req->priv;
292
293 if (!err)
294 memcpy(priv->result, req->result,
295 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
296
297 /* Restore the original crypto request. */
298 req->result = priv->result;
299
300 ahash_request_set_callback(req, priv->flags,
301 priv->complete, priv->data);
302 req->priv = NULL;
303
304 /* Free the req->priv.priv from the ADJUSTED request. */
305 kzfree(priv);
306 }
307
ahash_notify_einprogress(struct ahash_request * req)308 static void ahash_notify_einprogress(struct ahash_request *req)
309 {
310 struct ahash_request_priv *priv = req->priv;
311 struct crypto_async_request oreq;
312
313 oreq.data = priv->data;
314
315 priv->complete(&oreq, -EINPROGRESS);
316 }
317
ahash_op_unaligned_done(struct crypto_async_request * req,int err)318 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
319 {
320 struct ahash_request *areq = req->data;
321
322 if (err == -EINPROGRESS) {
323 ahash_notify_einprogress(areq);
324 return;
325 }
326
327 /*
328 * Restore the original request, see ahash_op_unaligned() for what
329 * goes where.
330 *
331 * The "struct ahash_request *req" here is in fact the "req.base"
332 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
333 * is a pointer to self, it is also the ADJUSTED "req" .
334 */
335
336 /* First copy req->result into req->priv.result */
337 ahash_restore_req(areq, err);
338
339 /* Complete the ORIGINAL request. */
340 areq->base.complete(&areq->base, err);
341 }
342
ahash_op_unaligned(struct ahash_request * req,int (* op)(struct ahash_request *))343 static int ahash_op_unaligned(struct ahash_request *req,
344 int (*op)(struct ahash_request *))
345 {
346 int err;
347
348 err = ahash_save_req(req, ahash_op_unaligned_done);
349 if (err)
350 return err;
351
352 err = op(req);
353 if (err == -EINPROGRESS ||
354 (err == -EBUSY && (ahash_request_flags(req) &
355 CRYPTO_TFM_REQ_MAY_BACKLOG)))
356 return err;
357
358 ahash_restore_req(req, err);
359
360 return err;
361 }
362
crypto_ahash_op(struct ahash_request * req,int (* op)(struct ahash_request *))363 static int crypto_ahash_op(struct ahash_request *req,
364 int (*op)(struct ahash_request *))
365 {
366 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
367 unsigned long alignmask = crypto_ahash_alignmask(tfm);
368
369 if ((unsigned long)req->result & alignmask)
370 return ahash_op_unaligned(req, op);
371
372 return op(req);
373 }
374
crypto_ahash_final(struct ahash_request * req)375 int crypto_ahash_final(struct ahash_request *req)
376 {
377 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
378 }
379 EXPORT_SYMBOL_GPL(crypto_ahash_final);
380
crypto_ahash_finup(struct ahash_request * req)381 int crypto_ahash_finup(struct ahash_request *req)
382 {
383 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
384 }
385 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
386
crypto_ahash_digest(struct ahash_request * req)387 int crypto_ahash_digest(struct ahash_request *req)
388 {
389 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
390 }
391 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
392
ahash_def_finup_done2(struct crypto_async_request * req,int err)393 static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
394 {
395 struct ahash_request *areq = req->data;
396
397 if (err == -EINPROGRESS)
398 return;
399
400 ahash_restore_req(areq, err);
401
402 areq->base.complete(&areq->base, err);
403 }
404
ahash_def_finup_finish1(struct ahash_request * req,int err)405 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
406 {
407 if (err)
408 goto out;
409
410 req->base.complete = ahash_def_finup_done2;
411
412 err = crypto_ahash_reqtfm(req)->final(req);
413 if (err == -EINPROGRESS ||
414 (err == -EBUSY && (ahash_request_flags(req) &
415 CRYPTO_TFM_REQ_MAY_BACKLOG)))
416 return err;
417
418 out:
419 ahash_restore_req(req, err);
420 return err;
421 }
422
ahash_def_finup_done1(struct crypto_async_request * req,int err)423 static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
424 {
425 struct ahash_request *areq = req->data;
426
427 if (err == -EINPROGRESS) {
428 ahash_notify_einprogress(areq);
429 return;
430 }
431
432 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
433
434 err = ahash_def_finup_finish1(areq, err);
435 if (areq->priv)
436 return;
437
438 areq->base.complete(&areq->base, err);
439 }
440
ahash_def_finup(struct ahash_request * req)441 static int ahash_def_finup(struct ahash_request *req)
442 {
443 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
444 int err;
445
446 err = ahash_save_req(req, ahash_def_finup_done1);
447 if (err)
448 return err;
449
450 err = tfm->update(req);
451 if (err == -EINPROGRESS ||
452 (err == -EBUSY && (ahash_request_flags(req) &
453 CRYPTO_TFM_REQ_MAY_BACKLOG)))
454 return err;
455
456 return ahash_def_finup_finish1(req, err);
457 }
458
ahash_no_export(struct ahash_request * req,void * out)459 static int ahash_no_export(struct ahash_request *req, void *out)
460 {
461 return -ENOSYS;
462 }
463
ahash_no_import(struct ahash_request * req,const void * in)464 static int ahash_no_import(struct ahash_request *req, const void *in)
465 {
466 return -ENOSYS;
467 }
468
crypto_ahash_init_tfm(struct crypto_tfm * tfm)469 static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
470 {
471 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
472 struct ahash_alg *alg = crypto_ahash_alg(hash);
473
474 hash->setkey = ahash_nosetkey;
475 hash->has_setkey = false;
476 hash->export = ahash_no_export;
477 hash->import = ahash_no_import;
478
479 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
480 return crypto_init_shash_ops_async(tfm);
481
482 hash->init = alg->init;
483 hash->update = alg->update;
484 hash->final = alg->final;
485 hash->finup = alg->finup ?: ahash_def_finup;
486 hash->digest = alg->digest;
487
488 if (alg->setkey) {
489 hash->setkey = alg->setkey;
490 hash->has_setkey = true;
491 }
492 if (alg->export)
493 hash->export = alg->export;
494 if (alg->import)
495 hash->import = alg->import;
496
497 return 0;
498 }
499
crypto_ahash_extsize(struct crypto_alg * alg)500 static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
501 {
502 if (alg->cra_type == &crypto_ahash_type)
503 return alg->cra_ctxsize;
504
505 return sizeof(struct crypto_shash *);
506 }
507
508 #ifdef CONFIG_NET
crypto_ahash_report(struct sk_buff * skb,struct crypto_alg * alg)509 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
510 {
511 struct crypto_report_hash rhash;
512
513 strncpy(rhash.type, "ahash", sizeof(rhash.type));
514
515 rhash.blocksize = alg->cra_blocksize;
516 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
517
518 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
519 sizeof(struct crypto_report_hash), &rhash))
520 goto nla_put_failure;
521 return 0;
522
523 nla_put_failure:
524 return -EMSGSIZE;
525 }
526 #else
crypto_ahash_report(struct sk_buff * skb,struct crypto_alg * alg)527 static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
528 {
529 return -ENOSYS;
530 }
531 #endif
532
533 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
534 __attribute__ ((unused));
crypto_ahash_show(struct seq_file * m,struct crypto_alg * alg)535 static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
536 {
537 seq_printf(m, "type : ahash\n");
538 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
539 "yes" : "no");
540 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
541 seq_printf(m, "digestsize : %u\n",
542 __crypto_hash_alg_common(alg)->digestsize);
543 }
544
545 const struct crypto_type crypto_ahash_type = {
546 .extsize = crypto_ahash_extsize,
547 .init_tfm = crypto_ahash_init_tfm,
548 #ifdef CONFIG_PROC_FS
549 .show = crypto_ahash_show,
550 #endif
551 .report = crypto_ahash_report,
552 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
553 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
554 .type = CRYPTO_ALG_TYPE_AHASH,
555 .tfmsize = offsetof(struct crypto_ahash, base),
556 };
557 EXPORT_SYMBOL_GPL(crypto_ahash_type);
558
crypto_alloc_ahash(const char * alg_name,u32 type,u32 mask)559 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
560 u32 mask)
561 {
562 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
563 }
564 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
565
ahash_prepare_alg(struct ahash_alg * alg)566 static int ahash_prepare_alg(struct ahash_alg *alg)
567 {
568 struct crypto_alg *base = &alg->halg.base;
569
570 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
571 alg->halg.statesize > PAGE_SIZE / 8 ||
572 alg->halg.statesize == 0)
573 return -EINVAL;
574
575 base->cra_type = &crypto_ahash_type;
576 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
577 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
578
579 return 0;
580 }
581
crypto_register_ahash(struct ahash_alg * alg)582 int crypto_register_ahash(struct ahash_alg *alg)
583 {
584 struct crypto_alg *base = &alg->halg.base;
585 int err;
586
587 err = ahash_prepare_alg(alg);
588 if (err)
589 return err;
590
591 return crypto_register_alg(base);
592 }
593 EXPORT_SYMBOL_GPL(crypto_register_ahash);
594
crypto_unregister_ahash(struct ahash_alg * alg)595 int crypto_unregister_ahash(struct ahash_alg *alg)
596 {
597 return crypto_unregister_alg(&alg->halg.base);
598 }
599 EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
600
ahash_register_instance(struct crypto_template * tmpl,struct ahash_instance * inst)601 int ahash_register_instance(struct crypto_template *tmpl,
602 struct ahash_instance *inst)
603 {
604 int err;
605
606 err = ahash_prepare_alg(&inst->alg);
607 if (err)
608 return err;
609
610 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
611 }
612 EXPORT_SYMBOL_GPL(ahash_register_instance);
613
ahash_free_instance(struct crypto_instance * inst)614 void ahash_free_instance(struct crypto_instance *inst)
615 {
616 crypto_drop_spawn(crypto_instance_ctx(inst));
617 kfree(ahash_instance(inst));
618 }
619 EXPORT_SYMBOL_GPL(ahash_free_instance);
620
crypto_init_ahash_spawn(struct crypto_ahash_spawn * spawn,struct hash_alg_common * alg,struct crypto_instance * inst)621 int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
622 struct hash_alg_common *alg,
623 struct crypto_instance *inst)
624 {
625 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
626 &crypto_ahash_type);
627 }
628 EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
629
ahash_attr_alg(struct rtattr * rta,u32 type,u32 mask)630 struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
631 {
632 struct crypto_alg *alg;
633
634 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
635 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
636 }
637 EXPORT_SYMBOL_GPL(ahash_attr_alg);
638
639 MODULE_LICENSE("GPL");
640 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
641