1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * sun8i-ce-hash.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5 *
6 * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
7 *
8 * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512.
9 *
10 * You could find the datasheet in Documentation/arm/sunxi.rst
11 */
12 #include <linux/dma-mapping.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/scatterlist.h>
15 #include <crypto/internal/hash.h>
16 #include <crypto/sha.h>
17 #include <crypto/md5.h>
18 #include "sun8i-ce.h"
19
sun8i_ce_hash_crainit(struct crypto_tfm * tfm)20 int sun8i_ce_hash_crainit(struct crypto_tfm *tfm)
21 {
22 struct sun8i_ce_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
23 struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
24 struct sun8i_ce_alg_template *algt;
25 int err;
26
27 memset(op, 0, sizeof(struct sun8i_ce_hash_tfm_ctx));
28
29 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
30 op->ce = algt->ce;
31
32 op->enginectx.op.do_one_request = sun8i_ce_hash_run;
33 op->enginectx.op.prepare_request = NULL;
34 op->enginectx.op.unprepare_request = NULL;
35
36 /* FALLBACK */
37 op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
38 CRYPTO_ALG_NEED_FALLBACK);
39 if (IS_ERR(op->fallback_tfm)) {
40 dev_err(algt->ce->dev, "Fallback driver could no be loaded\n");
41 return PTR_ERR(op->fallback_tfm);
42 }
43
44 if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
45 algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
46
47 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
48 sizeof(struct sun8i_ce_hash_reqctx) +
49 crypto_ahash_reqsize(op->fallback_tfm));
50
51 dev_info(op->ce->dev, "Fallback for %s is %s\n",
52 crypto_tfm_alg_driver_name(tfm),
53 crypto_tfm_alg_driver_name(&op->fallback_tfm->base));
54 err = pm_runtime_get_sync(op->ce->dev);
55 if (err < 0)
56 goto error_pm;
57 return 0;
58 error_pm:
59 pm_runtime_put_noidle(op->ce->dev);
60 crypto_free_ahash(op->fallback_tfm);
61 return err;
62 }
63
sun8i_ce_hash_craexit(struct crypto_tfm * tfm)64 void sun8i_ce_hash_craexit(struct crypto_tfm *tfm)
65 {
66 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
67
68 crypto_free_ahash(tfmctx->fallback_tfm);
69 pm_runtime_put_sync_suspend(tfmctx->ce->dev);
70 }
71
sun8i_ce_hash_init(struct ahash_request * areq)72 int sun8i_ce_hash_init(struct ahash_request *areq)
73 {
74 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
75 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
76 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
77
78 memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx));
79
80 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
81 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
82
83 return crypto_ahash_init(&rctx->fallback_req);
84 }
85
sun8i_ce_hash_export(struct ahash_request * areq,void * out)86 int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
87 {
88 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
89 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
90 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
91
92 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
93 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
94
95 return crypto_ahash_export(&rctx->fallback_req, out);
96 }
97
sun8i_ce_hash_import(struct ahash_request * areq,const void * in)98 int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
99 {
100 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
101 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
102 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
103
104 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
105 rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
106
107 return crypto_ahash_import(&rctx->fallback_req, in);
108 }
109
sun8i_ce_hash_final(struct ahash_request * areq)110 int sun8i_ce_hash_final(struct ahash_request *areq)
111 {
112 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
113 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
114 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
115 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
116 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
117 struct sun8i_ce_alg_template *algt;
118 #endif
119
120 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
121 rctx->fallback_req.base.flags = areq->base.flags &
122 CRYPTO_TFM_REQ_MAY_SLEEP;
123 rctx->fallback_req.result = areq->result;
124
125 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
126 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
127 algt->stat_fb++;
128 #endif
129
130 return crypto_ahash_final(&rctx->fallback_req);
131 }
132
sun8i_ce_hash_update(struct ahash_request * areq)133 int sun8i_ce_hash_update(struct ahash_request *areq)
134 {
135 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
136 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
137 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
138
139 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
140 rctx->fallback_req.base.flags = areq->base.flags &
141 CRYPTO_TFM_REQ_MAY_SLEEP;
142 rctx->fallback_req.nbytes = areq->nbytes;
143 rctx->fallback_req.src = areq->src;
144
145 return crypto_ahash_update(&rctx->fallback_req);
146 }
147
sun8i_ce_hash_finup(struct ahash_request * areq)148 int sun8i_ce_hash_finup(struct ahash_request *areq)
149 {
150 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
151 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
152 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
153 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
154 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
155 struct sun8i_ce_alg_template *algt;
156 #endif
157
158 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
159 rctx->fallback_req.base.flags = areq->base.flags &
160 CRYPTO_TFM_REQ_MAY_SLEEP;
161
162 rctx->fallback_req.nbytes = areq->nbytes;
163 rctx->fallback_req.src = areq->src;
164 rctx->fallback_req.result = areq->result;
165 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
166 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
167 algt->stat_fb++;
168 #endif
169
170 return crypto_ahash_finup(&rctx->fallback_req);
171 }
172
sun8i_ce_hash_digest_fb(struct ahash_request * areq)173 static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
174 {
175 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
176 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
177 struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
178 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
179 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
180 struct sun8i_ce_alg_template *algt;
181 #endif
182
183 ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
184 rctx->fallback_req.base.flags = areq->base.flags &
185 CRYPTO_TFM_REQ_MAY_SLEEP;
186
187 rctx->fallback_req.nbytes = areq->nbytes;
188 rctx->fallback_req.src = areq->src;
189 rctx->fallback_req.result = areq->result;
190 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
191 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
192 algt->stat_fb++;
193 #endif
194
195 return crypto_ahash_digest(&rctx->fallback_req);
196 }
197
sun8i_ce_hash_need_fallback(struct ahash_request * areq)198 static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
199 {
200 struct scatterlist *sg;
201
202 if (areq->nbytes == 0)
203 return true;
204 /* we need to reserve one SG for padding one */
205 if (sg_nents(areq->src) > MAX_SG - 1)
206 return true;
207 sg = areq->src;
208 while (sg) {
209 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
210 return true;
211 sg = sg_next(sg);
212 }
213 return false;
214 }
215
sun8i_ce_hash_digest(struct ahash_request * areq)216 int sun8i_ce_hash_digest(struct ahash_request *areq)
217 {
218 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
219 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
220 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
221 struct sun8i_ce_alg_template *algt;
222 struct sun8i_ce_dev *ce;
223 struct crypto_engine *engine;
224 struct scatterlist *sg;
225 int nr_sgs, e, i;
226
227 if (sun8i_ce_hash_need_fallback(areq))
228 return sun8i_ce_hash_digest_fb(areq);
229
230 nr_sgs = sg_nents(areq->src);
231 if (nr_sgs > MAX_SG - 1)
232 return sun8i_ce_hash_digest_fb(areq);
233
234 for_each_sg(areq->src, sg, nr_sgs, i) {
235 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
236 return sun8i_ce_hash_digest_fb(areq);
237 }
238
239 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
240 ce = algt->ce;
241
242 e = sun8i_ce_get_engine_number(ce);
243 rctx->flow = e;
244 engine = ce->chanlist[e].engine;
245
246 return crypto_transfer_hash_request_to_engine(engine, areq);
247 }
248
sun8i_ce_hash_run(struct crypto_engine * engine,void * breq)249 int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
250 {
251 struct ahash_request *areq = container_of(breq, struct ahash_request, base);
252 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
253 struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
254 struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
255 struct sun8i_ce_alg_template *algt;
256 struct sun8i_ce_dev *ce;
257 struct sun8i_ce_flow *chan;
258 struct ce_task *cet;
259 struct scatterlist *sg;
260 int nr_sgs, flow, err;
261 unsigned int len;
262 u32 common;
263 u64 byte_count;
264 __le32 *bf;
265 void *buf = NULL;
266 int j, i, todo;
267 int nbw = 0;
268 u64 fill, min_fill;
269 __be64 *bebits;
270 __le64 *lebits;
271 void *result = NULL;
272 u64 bs;
273 int digestsize;
274 dma_addr_t addr_res, addr_pad;
275
276 algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash);
277 ce = algt->ce;
278
279 bs = algt->alg.hash.halg.base.cra_blocksize;
280 digestsize = algt->alg.hash.halg.digestsize;
281 if (digestsize == SHA224_DIGEST_SIZE)
282 digestsize = SHA256_DIGEST_SIZE;
283 if (digestsize == SHA384_DIGEST_SIZE)
284 digestsize = SHA512_DIGEST_SIZE;
285
286 /* the padding could be up to two block. */
287 buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA);
288 if (!buf) {
289 err = -ENOMEM;
290 goto theend;
291 }
292 bf = (__le32 *)buf;
293
294 result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
295 if (!result) {
296 err = -ENOMEM;
297 goto theend;
298 }
299
300 flow = rctx->flow;
301 chan = &ce->chanlist[flow];
302
303 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
304 algt->stat_req++;
305 #endif
306 dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
307
308 cet = chan->tl;
309 memset(cet, 0, sizeof(struct ce_task));
310
311 cet->t_id = cpu_to_le32(flow);
312 common = ce->variant->alg_hash[algt->ce_algo_id];
313 common |= CE_COMM_INT;
314 cet->t_common_ctl = cpu_to_le32(common);
315
316 cet->t_sym_ctl = 0;
317 cet->t_asym_ctl = 0;
318
319 nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
320 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
321 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
322 err = -EINVAL;
323 goto theend;
324 }
325
326 len = areq->nbytes;
327 for_each_sg(areq->src, sg, nr_sgs, i) {
328 cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
329 todo = min(len, sg_dma_len(sg));
330 cet->t_src[i].len = cpu_to_le32(todo / 4);
331 len -= todo;
332 }
333 if (len > 0) {
334 dev_err(ce->dev, "remaining len %d\n", len);
335 err = -EINVAL;
336 goto theend;
337 }
338 addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
339 cet->t_dst[0].addr = cpu_to_le32(addr_res);
340 cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
341 if (dma_mapping_error(ce->dev, addr_res)) {
342 dev_err(ce->dev, "DMA map dest\n");
343 err = -EINVAL;
344 goto theend;
345 }
346
347 byte_count = areq->nbytes;
348 j = 0;
349 bf[j++] = cpu_to_le32(0x80);
350
351 if (bs == 64) {
352 fill = 64 - (byte_count % 64);
353 min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
354 } else {
355 fill = 128 - (byte_count % 128);
356 min_fill = 4 * sizeof(u32) + (nbw ? 0 : sizeof(u32));
357 }
358
359 if (fill < min_fill)
360 fill += bs;
361
362 j += (fill - min_fill) / sizeof(u32);
363
364 switch (algt->ce_algo_id) {
365 case CE_ID_HASH_MD5:
366 lebits = (__le64 *)&bf[j];
367 *lebits = cpu_to_le64(byte_count << 3);
368 j += 2;
369 break;
370 case CE_ID_HASH_SHA1:
371 case CE_ID_HASH_SHA224:
372 case CE_ID_HASH_SHA256:
373 bebits = (__be64 *)&bf[j];
374 *bebits = cpu_to_be64(byte_count << 3);
375 j += 2;
376 break;
377 case CE_ID_HASH_SHA384:
378 case CE_ID_HASH_SHA512:
379 bebits = (__be64 *)&bf[j];
380 *bebits = cpu_to_be64(byte_count >> 61);
381 j += 2;
382 bebits = (__be64 *)&bf[j];
383 *bebits = cpu_to_be64(byte_count << 3);
384 j += 2;
385 break;
386 }
387
388 addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
389 cet->t_src[i].addr = cpu_to_le32(addr_pad);
390 cet->t_src[i].len = cpu_to_le32(j);
391 if (dma_mapping_error(ce->dev, addr_pad)) {
392 dev_err(ce->dev, "DMA error on padding SG\n");
393 err = -EINVAL;
394 goto theend;
395 }
396
397 if (ce->variant->hash_t_dlen_in_bits)
398 cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8);
399 else
400 cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
401
402 chan->timeout = areq->nbytes;
403
404 err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(areq->base.tfm));
405
406 dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
407 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
408 dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
409
410
411 memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
412 theend:
413 kfree(buf);
414 kfree(result);
415 crypto_finalize_hash_request(engine, breq, err);
416 return 0;
417 }
418