1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * sun8i-ce-hash.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5 *
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7 *
8 * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
9 *
10 * You could find the datasheet in Documentation/arm/sunxi.rst
11 */
12 #include <linux/bottom_half.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/sha1.h>
18 #include <crypto/sha2.h>
19 #include <crypto/md5.h>
20 #include "sun8i-ce.h"
21
sun8i_ce_hash_crainit(struct crypto_tfm * tfm)22 int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
23 {
24 struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
25 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
26 struct sun8i_ce_alg_template *algt;
27 int err;
28
29 memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
30
31 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
32 op->ce = algt->ce;
33
34 op->enginectx.op.do_one_request = sun8i_ce_hash_run;
35 op->enginectx.op.prepare_request = NULL;
36 op->enginectx.op.unprepare_request = NULL;
37
38 /* FALLBACK */
39 op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
40 CRYPTO_ALG_NEED_FALLBACK);
41 if (IS_ERR(op->fallback_tfm)) {
42 dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
43 return PTR_ERR(op->fallback_tfm);
44 }
45
46 if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
47 algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
48
49 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
50 sizeof(struct sun8i_ce_hash_reqctx) +
51 crypto_ahash_reqsize(op->fallback_tfm));
52
53 dev_info(op->ce->dev, "Fallback for %s is %s\n",
54 crypto_tfm_alg_driver_name(tfm),
55 crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
56 err = pm_runtime_get_sync(op->ce->dev);
57 if (err < 0)
58 goto error_pm;
59 return 0;
60 error_pm:
61 pm_runtime_put_noidle(op->ce->dev);
62 crypto_free_ahash(op->fallback_tfm);
63 return err;
64 }
65
sun8i_ce_hash_craexit(struct crypto_tfm * tfm)66 void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
67 {
68 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
69
70 crypto_free_ahash(tfmctx->fallback_tfm);
71 pm_runtime_put_sync_suspend(tfmctx->ce->dev);
72 }
73
sun8i_ce_hash_init(struct ahash_request * areq)74 int sun8i_ce_hash_init(struct ahash_request *areq)
75 {
76 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
77 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
78 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
79
80 memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
81
82 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
83 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
84
85 return crypto_ahash_init(&rctx->fallback_req);
86 }
87
sun8i_ce_hash_export(struct ahash_request * areq,void * out)88 int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
89 {
90 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
91 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
92 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
93
94 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
95 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
96
97 return crypto_ahash_export(&rctx->fallback_req, out);
98 }
99
sun8i_ce_hash_import(struct ahash_request * areq,const void * in)100 int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
101 {
102 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
103 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
104 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
105
106 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
107 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
108
109 return crypto_ahash_import(&rctx->fallback_req, in);
110 }
111
sun8i_ce_hash_final(struct ahash_request * areq)112 int sun8i_ce_hash_final(struct ahash_request *areq)
113 {
114 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
115 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
116 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
117 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
118 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
119 struct sun8i_ce_alg_template *algt;
120 #endif
121
122 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
123 rctx->fallback_req.base.flags = areq->base.flags &
124 CRYPTO_TFM_REQ_MAY_SLEEP;
125 rctx->fallback_req.result = areq->result;
126
127 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
128 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
129 algt->stat_fb++;
130 #endif
131
132 return crypto_ahash_final(&rctx->fallback_req);
133 }
134
sun8i_ce_hash_update(struct ahash_request * areq)135 int sun8i_ce_hash_update(struct ahash_request *areq)
136 {
137 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
138 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
139 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
140
141 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
142 rctx->fallback_req.base.flags = areq->base.flags &
143 CRYPTO_TFM_REQ_MAY_SLEEP;
144 rctx->fallback_req.nbytes = areq->nbytes;
145 rctx->fallback_req.src = areq->src;
146
147 return crypto_ahash_update(&rctx->fallback_req);
148 }
149
sun8i_ce_hash_finup(struct ahash_request * areq)150 int sun8i_ce_hash_finup(struct ahash_request *areq)
151 {
152 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
153 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
154 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
155 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
156 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
157 struct sun8i_ce_alg_template *algt;
158 #endif
159
160 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
161 rctx->fallback_req.base.flags = areq->base.flags &
162 CRYPTO_TFM_REQ_MAY_SLEEP;
163
164 rctx->fallback_req.nbytes = areq->nbytes;
165 rctx->fallback_req.src = areq->src;
166 rctx->fallback_req.result = areq->result;
167 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
168 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
169 algt->stat_fb++;
170 #endif
171
172 return crypto_ahash_finup(&rctx->fallback_req);
173 }
174
sun8i_ce_hash_digest_fb(struct ahash_request * areq)175 static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
176 {
177 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
178 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
179 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
180 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
181 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
182 struct sun8i_ce_alg_template *algt;
183 #endif
184
185 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
186 rctx->fallback_req.base.flags = areq->base.flags &
187 CRYPTO_TFM_REQ_MAY_SLEEP;
188
189 rctx->fallback_req.nbytes = areq->nbytes;
190 rctx->fallback_req.src = areq->src;
191 rctx->fallback_req.result = areq->result;
192 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
193 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
194 algt->stat_fb++;
195 #endif
196
197 return crypto_ahash_digest(&rctx->fallback_req);
198 }
199
sun8i_ce_hash_need_fallback(struct ahash_request * areq)200 static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
201 {
202 struct scatterlist *sg;
203
204 if (areq->nbytes == 0)
205 return true;
206 /* we need to reserve one SG for padding one */
207 if (sg_nents(areq->src) > MAX_SG - 1)
208 return true;
209 sg = areq->src;
210 while (sg) {
211 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
212 return true;
213 sg = sg_next(sg);
214 }
215 return false;
216 }
217
sun8i_ce_hash_digest(struct ahash_request * areq)218 int sun8i_ce_hash_digest(struct ahash_request *areq)
219 {
220 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
221 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
222 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
223 struct sun8i_ce_alg_template *algt;
224 struct sun8i_ce_dev *ce;
225 struct crypto_engine *engine;
226 struct scatterlist *sg;
227 int nr_sgs, e, i;
228
229 if (sun8i_ce_hash_need_fallback(areq))
230 return sun8i_ce_hash_digest_fb(areq);
231
232 nr_sgs = sg_nents(areq->src);
233 if (nr_sgs > MAX_SG - 1)
234 return sun8i_ce_hash_digest_fb(areq);
235
236 for_each_sg(areq->src, sg, nr_sgs, i) {
237 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
238 return sun8i_ce_hash_digest_fb(areq);
239 }
240
241 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
242 ce = algt->ce;
243
244 e = sun8i_ce_get_engine_number(ce);
245 rctx->flow = e;
246 engine = ce->chanlist[e].engine;
247
248 return crypto_transfer_hash_request_to_engine(engine, areq);
249 }
250
sun8i_ce_hash_run(struct crypto_engine * engine,void * breq)251 int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
252 {
253 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
254 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
255 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
256 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
257 struct sun8i_ce_alg_template *algt;
258 struct sun8i_ce_dev *ce;
259 struct sun8i_ce_flow *chan;
260 struct ce_task *cet;
261 struct scatterlist *sg;
262 int nr_sgs, flow, err;
263 unsigned int len;
264 u32 common;
265 u64 byte_count;
266 __le32 *bf;
267 void *buf = NULL;
268 int j, i, todo;
269 int nbw = 0;
270 u64 fill, min_fill;
271 __be64 *bebits;
272 __le64 *lebits;
273 void *result = NULL;
274 u64 bs;
275 int digestsize;
276 dma_addr_t addr_res, addr_pad;
277
278 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
279 ce = algt->ce;
280
281 bs = algt->alg.hash.halg.base.cra_blocksize;
282 digestsize = algt->alg.hash.halg.digestsize;
283 if (digestsize == SHA224_DIGEST_SIZE)
284 digestsize = SHA256_DIGEST_SIZE;
285 if (digestsize == SHA384_DIGEST_SIZE)
286 digestsize = SHA512_DIGEST_SIZE;
287
288 /* the padding could be up to two block. */
289 buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
290 if (!buf) {
291 err = -ENOMEM;
292 goto theend;
293 }
294 bf = (__le32 *)buf;
295
296 result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
297 if (!result) {
298 err = -ENOMEM;
299 goto theend;
300 }
301
302 flow = rctx->flow;
303 chan = &ce->chanlist[flow];
304
305 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
306 algt->stat_req++;
307 #endif
308 dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
309
310 cet = chan->tl;
311 memset(cet, 0, sizeof(struct ce_task));
312
313 cet->t_id = cpu_to_le32(flow);
314 common = ce->variant->alg_hash[algt->ce_algo_id];
315 common |= CE_COMM_INT;
316 cet->t_common_ctl = cpu_to_le32(common);
317
318 cet->t_sym_ctl = 0;
319 cet->t_asym_ctl = 0;
320
321 nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
322 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
323 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
324 err = -EINVAL;
325 goto theend;
326 }
327
328 len = areq->nbytes;
329 for_each_sg(areq->src, sg, nr_sgs, i) {
330 cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
331 todo = min(len, sg_dma_len(sg));
332 cet->t_src[i].len = cpu_to_le32(todo / 4);
333 len -= todo;
334 }
335 if (len > 0) {
336 dev_err(ce->dev, "remaining len %d\n", len);
337 err = -EINVAL;
338 goto theend;
339 }
340 addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
341 cet->t_dst[0].addr = cpu_to_le32(addr_res);
342 cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
343 if (dma_mapping_error(ce->dev, addr_res)) {
344 dev_err(ce->dev, "DMA map dest\n");
345 err = -EINVAL;
346 goto theend;
347 }
348
349 byte_count = areq->nbytes;
350 j = 0;
351 bf[j++] = cpu_to_le32(0x80);
352
353 if (bs == 64) {
354 fill = 64 - (byte_count % 64);
355 min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
356 } else {
357 fill = 128 - (byte_count % 128);
358 min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
359 }
360
361 if (fill < min_fill)
362 fill += bs;
363
364 j += (fill - min_fill) / sizeof(u32);
365
366 switch (algt->ce_algo_id) {
367 case CE_ID_HASH_MD5:
368 lebits = (__le64 *)&bf[j];
369 *lebits = cpu_to_le64(byte_count << 3);
370 j += 2;
371 break;
372 case CE_ID_HASH_SHA1:
373 case CE_ID_HASH_SHA224:
374 case CE_ID_HASH_SHA256:
375 bebits = (__be64 *)&bf[j];
376 *bebits = cpu_to_be64(byte_count << 3);
377 j += 2;
378 break;
379 case CE_ID_HASH_SHA384:
380 case CE_ID_HASH_SHA512:
381 bebits = (__be64 *)&bf[j];
382 *bebits = cpu_to_be64(byte_count >> 61);
383 j += 2;
384 bebits = (__be64 *)&bf[j];
385 *bebits = cpu_to_be64(byte_count << 3);
386 j += 2;
387 break;
388 }
389
390 addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
391 cet->t_src[i].addr = cpu_to_le32(addr_pad);
392 cet->t_src[i].len = cpu_to_le32(j);
393 if (dma_mapping_error(ce->dev, addr_pad)) {
394 dev_err(ce->dev, "DMA error on padding SG\n");
395 err = -EINVAL;
396 goto theend;
397 }
398
399 if (ce->variant->hash_t_dlen_in_bits)
400 cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
401 else
402 cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
403
404 chan->timeout = areq->nbytes;
405
406 err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
407
408 dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
409 dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
410 DMA_TO_DEVICE);
411 dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
412
413
414 memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
415 theend:
416 kfree(buf);
417 kfree(result);
418 local_bh_disable();
419 crypto_finalize_hash_request(engine, breq, err);
420 local_bh_enable();
421 return 0;
422 }
423