• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Cryptographic API.
3  *
4  * Digest operations.
5  *
6  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License as published by the Free
10  * Software Foundation; either version 2 of the License, or (at your option)
11  * any later version.
12  *
13  */
14 
15 #include <crypto/internal/hash.h>
16 #include <crypto/scatterwalk.h>
17 #include <linux/mm.h>
18 #include <linux/errno.h>
19 #include <linux/hardirq.h>
20 #include <linux/highmem.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/scatterlist.h>
24 
25 #include "internal.h"
26 
init(struct hash_desc * desc)27 static int init(struct hash_desc *desc)
28 {
29 	struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
30 
31 	tfm->__crt_alg->cra_digest.dia_init(tfm);
32 	return 0;
33 }
34 
update2(struct hash_desc * desc,struct scatterlist * sg,unsigned int nbytes)35 static int update2(struct hash_desc *desc,
36 		   struct scatterlist *sg, unsigned int nbytes)
37 {
38 	struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
39 	unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
40 
41 	if (!nbytes)
42 		return 0;
43 
44 	for (;;) {
45 		struct page *pg = sg_page(sg);
46 		unsigned int offset = sg->offset;
47 		unsigned int l = sg->length;
48 
49 		if (unlikely(l > nbytes))
50 			l = nbytes;
51 		nbytes -= l;
52 
53 		do {
54 			unsigned int bytes_from_page = min(l, ((unsigned int)
55 							   (PAGE_SIZE)) -
56 							   offset);
57 			char *src = crypto_kmap(pg, 0);
58 			char *p = src + offset;
59 
60 			if (unlikely(offset & alignmask)) {
61 				unsigned int bytes =
62 					alignmask + 1 - (offset & alignmask);
63 				bytes = min(bytes, bytes_from_page);
64 				tfm->__crt_alg->cra_digest.dia_update(tfm, p,
65 								      bytes);
66 				p += bytes;
67 				bytes_from_page -= bytes;
68 				l -= bytes;
69 			}
70 			tfm->__crt_alg->cra_digest.dia_update(tfm, p,
71 							      bytes_from_page);
72 			crypto_kunmap(src, 0);
73 			crypto_yield(desc->flags);
74 			offset = 0;
75 			pg++;
76 			l -= bytes_from_page;
77 		} while (l > 0);
78 
79 		if (!nbytes)
80 			break;
81 		sg = scatterwalk_sg_next(sg);
82 	}
83 
84 	return 0;
85 }
86 
update(struct hash_desc * desc,struct scatterlist * sg,unsigned int nbytes)87 static int update(struct hash_desc *desc,
88 		  struct scatterlist *sg, unsigned int nbytes)
89 {
90 	if (WARN_ON_ONCE(in_irq()))
91 		return -EDEADLK;
92 	return update2(desc, sg, nbytes);
93 }
94 
final(struct hash_desc * desc,u8 * out)95 static int final(struct hash_desc *desc, u8 *out)
96 {
97 	struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
98 	unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
99 	struct digest_alg *digest = &tfm->__crt_alg->cra_digest;
100 
101 	if (unlikely((unsigned long)out & alignmask)) {
102 		unsigned long align = alignmask + 1;
103 		unsigned long addr = (unsigned long)crypto_tfm_ctx(tfm);
104 		u8 *dst = (u8 *)ALIGN(addr, align) +
105 			  ALIGN(tfm->__crt_alg->cra_ctxsize, align);
106 
107 		digest->dia_final(tfm, dst);
108 		memcpy(out, dst, digest->dia_digestsize);
109 	} else
110 		digest->dia_final(tfm, out);
111 
112 	return 0;
113 }
114 
nosetkey(struct crypto_hash * tfm,const u8 * key,unsigned int keylen)115 static int nosetkey(struct crypto_hash *tfm, const u8 *key, unsigned int keylen)
116 {
117 	crypto_hash_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
118 	return -ENOSYS;
119 }
120 
setkey(struct crypto_hash * hash,const u8 * key,unsigned int keylen)121 static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
122 {
123 	struct crypto_tfm *tfm = crypto_hash_tfm(hash);
124 
125 	crypto_hash_clear_flags(hash, CRYPTO_TFM_RES_MASK);
126 	return tfm->__crt_alg->cra_digest.dia_setkey(tfm, key, keylen);
127 }
128 
digest(struct hash_desc * desc,struct scatterlist * sg,unsigned int nbytes,u8 * out)129 static int digest(struct hash_desc *desc,
130 		  struct scatterlist *sg, unsigned int nbytes, u8 *out)
131 {
132 	if (WARN_ON_ONCE(in_irq()))
133 		return -EDEADLK;
134 
135 	init(desc);
136 	update2(desc, sg, nbytes);
137 	return final(desc, out);
138 }
139 
crypto_init_digest_ops(struct crypto_tfm * tfm)140 int crypto_init_digest_ops(struct crypto_tfm *tfm)
141 {
142 	struct hash_tfm *ops = &tfm->crt_hash;
143 	struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
144 
145 	if (dalg->dia_digestsize > PAGE_SIZE / 8)
146 		return -EINVAL;
147 
148 	ops->init	= init;
149 	ops->update	= update;
150 	ops->final	= final;
151 	ops->digest	= digest;
152 	ops->setkey	= dalg->dia_setkey ? setkey : nosetkey;
153 	ops->digestsize	= dalg->dia_digestsize;
154 
155 	return 0;
156 }
157 
crypto_exit_digest_ops(struct crypto_tfm * tfm)158 void crypto_exit_digest_ops(struct crypto_tfm *tfm)
159 {
160 }
161 
digest_async_nosetkey(struct crypto_ahash * tfm_async,const u8 * key,unsigned int keylen)162 static int digest_async_nosetkey(struct crypto_ahash *tfm_async, const u8 *key,
163 			unsigned int keylen)
164 {
165 	crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
166 	return -ENOSYS;
167 }
168 
digest_async_setkey(struct crypto_ahash * tfm_async,const u8 * key,unsigned int keylen)169 static int digest_async_setkey(struct crypto_ahash *tfm_async, const u8 *key,
170 			unsigned int keylen)
171 {
172 	struct crypto_tfm    *tfm        = crypto_ahash_tfm(tfm_async);
173 	struct digest_alg    *dalg       = &tfm->__crt_alg->cra_digest;
174 
175 	crypto_ahash_clear_flags(tfm_async, CRYPTO_TFM_RES_MASK);
176 	return dalg->dia_setkey(tfm, key, keylen);
177 }
178 
digest_async_init(struct ahash_request * req)179 static int digest_async_init(struct ahash_request *req)
180 {
181 	struct crypto_tfm *tfm  = req->base.tfm;
182 	struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
183 
184 	dalg->dia_init(tfm);
185 	return 0;
186 }
187 
digest_async_update(struct ahash_request * req)188 static int digest_async_update(struct ahash_request *req)
189 {
190 	struct crypto_tfm *tfm = req->base.tfm;
191 	struct hash_desc  desc = {
192 		.tfm   = __crypto_hash_cast(tfm),
193 		.flags = req->base.flags,
194 	};
195 
196 	update(&desc, req->src, req->nbytes);
197 	return 0;
198 }
199 
digest_async_final(struct ahash_request * req)200 static int digest_async_final(struct ahash_request *req)
201 {
202 	struct crypto_tfm *tfm  = req->base.tfm;
203 	struct hash_desc  desc = {
204 		.tfm   = __crypto_hash_cast(tfm),
205 		.flags = req->base.flags,
206 	};
207 
208 	final(&desc, req->result);
209 	return 0;
210 }
211 
digest_async_digest(struct ahash_request * req)212 static int digest_async_digest(struct ahash_request *req)
213 {
214 	struct crypto_tfm *tfm  = req->base.tfm;
215 	struct hash_desc  desc = {
216 		.tfm   = __crypto_hash_cast(tfm),
217 		.flags = req->base.flags,
218 	};
219 
220 	return digest(&desc, req->src, req->nbytes, req->result);
221 }
222 
crypto_init_digest_ops_async(struct crypto_tfm * tfm)223 int crypto_init_digest_ops_async(struct crypto_tfm *tfm)
224 {
225 	struct ahash_tfm  *crt  = &tfm->crt_ahash;
226 	struct digest_alg *dalg = &tfm->__crt_alg->cra_digest;
227 
228 	if (dalg->dia_digestsize > PAGE_SIZE / 8)
229 		return -EINVAL;
230 
231 	crt->init       = digest_async_init;
232 	crt->update     = digest_async_update;
233 	crt->final      = digest_async_final;
234 	crt->digest     = digest_async_digest;
235 	crt->setkey     = dalg->dia_setkey ? digest_async_setkey :
236 						digest_async_nosetkey;
237 	crt->digestsize = dalg->dia_digestsize;
238 
239 	return 0;
240 }
241