1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * sun8i-ce-cipher.c - hardware cryptographic offloader for
4 * Allwinner H3/A64/H5/H2+/H6/R40 SoC
5 *
6 * Copyright (C) 2016-2019 Corentin LABBE <clabbe.montjoie@gmail.com>
7 *
8 * This file add support for AES cipher with 128,192,256 bits keysize in
9 * CBC and ECB mode.
10 *
11 * You could find a link for the datasheet in Documentation/arm/sunxi.rst
12 */
13
14 #include <linux/bottom_half.h>
15 #include <linux/crypto.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/io.h>
18 #include <linux/pm_runtime.h>
19 #include <crypto/scatterwalk.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/internal/skcipher.h>
22 #include "sun8i-ce.h"
23
sun8i_ce_cipher_need_fallback(struct skcipher_request * areq)24 static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq)
25 {
26 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
27 struct scatterlist *sg;
28
29 if (sg_nents(areq->src) > MAX_SG || sg_nents(areq->dst) > MAX_SG)
30 return true;
31
32 if (areq->cryptlen < crypto_skcipher_ivsize(tfm))
33 return true;
34
35 if (areq->cryptlen == 0 || areq->cryptlen % 16)
36 return true;
37
38 sg = areq->src;
39 while (sg) {
40 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
41 return true;
42 sg = sg_next(sg);
43 }
44 sg = areq->dst;
45 while (sg) {
46 if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32)))
47 return true;
48 sg = sg_next(sg);
49 }
50 return false;
51 }
52
sun8i_ce_cipher_fallback(struct skcipher_request * areq)53 static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
54 {
55 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
56 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
57 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
58 int err;
59 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
60 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
61 struct sun8i_ce_alg_template *algt;
62
63 algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
64 algt->stat_fb++;
65 #endif
66
67 skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
68 skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
69 areq->base.complete, areq->base.data);
70 skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
71 areq->cryptlen, areq->iv);
72 if (rctx->op_dir & CE_DECRYPTION)
73 err = crypto_skcipher_decrypt(&rctx->fallback_req);
74 else
75 err = crypto_skcipher_encrypt(&rctx->fallback_req);
76 return err;
77 }
78
sun8i_ce_cipher_prepare(struct crypto_engine * engine,void * async_req)79 static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
80 {
81 struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
82 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
83 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
84 struct sun8i_ce_dev *ce = op->ce;
85 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
86 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
87 struct sun8i_ce_alg_template *algt;
88 struct sun8i_ce_flow *chan;
89 struct ce_task *cet;
90 struct scatterlist *sg;
91 unsigned int todo, len, offset, ivsize;
92 u32 common, sym;
93 int flow, i;
94 int nr_sgs = 0;
95 int nr_sgd = 0;
96 int err = 0;
97
98 algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
99
100 dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__,
101 crypto_tfm_alg_name(areq->base.tfm),
102 areq->cryptlen,
103 rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm),
104 op->keylen);
105
106 #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
107 algt->stat_req++;
108 #endif
109
110 flow = rctx->flow;
111
112 chan = &ce->chanlist[flow];
113
114 cet = chan->tl;
115 memset(cet, 0, sizeof(struct ce_task));
116
117 cet->t_id = cpu_to_le32(flow);
118 common = ce->variant->alg_cipher[algt->ce_algo_id];
119 common |= rctx->op_dir | CE_COMM_INT;
120 cet->t_common_ctl = cpu_to_le32(common);
121 /* CTS and recent CE (H6) need length in bytes, in word otherwise */
122 if (ce->variant->cipher_t_dlen_in_bytes)
123 cet->t_dlen = cpu_to_le32(areq->cryptlen);
124 else
125 cet->t_dlen = cpu_to_le32(areq->cryptlen / 4);
126
127 sym = ce->variant->op_mode[algt->ce_blockmode];
128 len = op->keylen;
129 switch (len) {
130 case 128 / 8:
131 sym |= CE_AES_128BITS;
132 break;
133 case 192 / 8:
134 sym |= CE_AES_192BITS;
135 break;
136 case 256 / 8:
137 sym |= CE_AES_256BITS;
138 break;
139 }
140
141 cet->t_sym_ctl = cpu_to_le32(sym);
142 cet->t_asym_ctl = 0;
143
144 rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE);
145 if (dma_mapping_error(ce->dev, rctx->addr_key)) {
146 dev_err(ce->dev, "Cannot DMA MAP KEY\n");
147 err = -EFAULT;
148 goto theend;
149 }
150 cet->t_key = cpu_to_le32(rctx->addr_key);
151
152 ivsize = crypto_skcipher_ivsize(tfm);
153 if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
154 rctx->ivlen = ivsize;
155 rctx->bounce_iv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA);
156 if (!rctx->bounce_iv) {
157 err = -ENOMEM;
158 goto theend_key;
159 }
160 if (rctx->op_dir & CE_DECRYPTION) {
161 rctx->backup_iv = kzalloc(ivsize, GFP_KERNEL);
162 if (!rctx->backup_iv) {
163 err = -ENOMEM;
164 goto theend_key;
165 }
166 offset = areq->cryptlen - ivsize;
167 scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
168 offset, ivsize, 0);
169 }
170 memcpy(rctx->bounce_iv, areq->iv, ivsize);
171 rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, rctx->ivlen,
172 DMA_TO_DEVICE);
173 if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
174 dev_err(ce->dev, "Cannot DMA MAP IV\n");
175 err = -ENOMEM;
176 goto theend_iv;
177 }
178 cet->t_iv = cpu_to_le32(rctx->addr_iv);
179 }
180
181 if (areq->src == areq->dst) {
182 nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
183 DMA_BIDIRECTIONAL);
184 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
185 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
186 err = -EINVAL;
187 goto theend_iv;
188 }
189 nr_sgd = nr_sgs;
190 } else {
191 nr_sgs = dma_map_sg(ce->dev, areq->src, sg_nents(areq->src),
192 DMA_TO_DEVICE);
193 if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
194 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
195 err = -EINVAL;
196 goto theend_iv;
197 }
198 nr_sgd = dma_map_sg(ce->dev, areq->dst, sg_nents(areq->dst),
199 DMA_FROM_DEVICE);
200 if (nr_sgd <= 0 || nr_sgd > MAX_SG) {
201 dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd);
202 err = -EINVAL;
203 goto theend_sgs;
204 }
205 }
206
207 len = areq->cryptlen;
208 for_each_sg(areq->src, sg, nr_sgs, i) {
209 cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg));
210 todo = min(len, sg_dma_len(sg));
211 cet->t_src[i].len = cpu_to_le32(todo / 4);
212 dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
213 areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo);
214 len -= todo;
215 }
216 if (len > 0) {
217 dev_err(ce->dev, "remaining len %d\n", len);
218 err = -EINVAL;
219 goto theend_sgs;
220 }
221
222 len = areq->cryptlen;
223 for_each_sg(areq->dst, sg, nr_sgd, i) {
224 cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg));
225 todo = min(len, sg_dma_len(sg));
226 cet->t_dst[i].len = cpu_to_le32(todo / 4);
227 dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__,
228 areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo);
229 len -= todo;
230 }
231 if (len > 0) {
232 dev_err(ce->dev, "remaining len %d\n", len);
233 err = -EINVAL;
234 goto theend_sgs;
235 }
236
237 chan->timeout = areq->cryptlen;
238 rctx->nr_sgs = nr_sgs;
239 rctx->nr_sgd = nr_sgd;
240 return 0;
241
242 theend_sgs:
243 if (areq->src == areq->dst) {
244 dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
245 DMA_BIDIRECTIONAL);
246 } else {
247 if (nr_sgs > 0)
248 dma_unmap_sg(ce->dev, areq->src, sg_nents(areq->src),
249 DMA_TO_DEVICE);
250 dma_unmap_sg(ce->dev, areq->dst, sg_nents(areq->dst),
251 DMA_FROM_DEVICE);
252 }
253
254 theend_iv:
255 if (areq->iv && ivsize > 0) {
256 if (rctx->addr_iv)
257 dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
258 offset = areq->cryptlen - ivsize;
259 if (rctx->op_dir & CE_DECRYPTION) {
260 memcpy(areq->iv, rctx->backup_iv, ivsize);
261 kfree_sensitive(rctx->backup_iv);
262 } else {
263 scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
264 ivsize, 0);
265 }
266 kfree(rctx->bounce_iv);
267 }
268
269 theend_key:
270 dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
271
272 theend:
273 return err;
274 }
275
sun8i_ce_cipher_run(struct crypto_engine * engine,void * areq)276 static int sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
277 {
278 struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
279 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
280 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
281 struct sun8i_ce_dev *ce = op->ce;
282 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
283 int flow, err;
284
285 flow = rctx->flow;
286 err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
287 local_bh_disable();
288 crypto_finalize_skcipher_request(engine, breq, err);
289 local_bh_enable();
290 return 0;
291 }
292
sun8i_ce_cipher_unprepare(struct crypto_engine * engine,void * async_req)293 static int sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req)
294 {
295 struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
296 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
297 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
298 struct sun8i_ce_dev *ce = op->ce;
299 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
300 struct sun8i_ce_flow *chan;
301 struct ce_task *cet;
302 unsigned int ivsize, offset;
303 int nr_sgs = rctx->nr_sgs;
304 int nr_sgd = rctx->nr_sgd;
305 int flow;
306
307 flow = rctx->flow;
308 chan = &ce->chanlist[flow];
309 cet = chan->tl;
310 ivsize = crypto_skcipher_ivsize(tfm);
311
312 if (areq->src == areq->dst) {
313 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL);
314 } else {
315 if (nr_sgs > 0)
316 dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
317 dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE);
318 }
319
320 if (areq->iv && ivsize > 0) {
321 if (cet->t_iv)
322 dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE);
323 offset = areq->cryptlen - ivsize;
324 if (rctx->op_dir & CE_DECRYPTION) {
325 memcpy(areq->iv, rctx->backup_iv, ivsize);
326 kfree_sensitive(rctx->backup_iv);
327 } else {
328 scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
329 ivsize, 0);
330 }
331 kfree(rctx->bounce_iv);
332 }
333
334 dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
335
336 return 0;
337 }
338
sun8i_ce_skdecrypt(struct skcipher_request * areq)339 int sun8i_ce_skdecrypt(struct skcipher_request *areq)
340 {
341 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
342 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
343 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
344 struct crypto_engine *engine;
345 int e;
346
347 rctx->op_dir = CE_DECRYPTION;
348 if (sun8i_ce_cipher_need_fallback(areq))
349 return sun8i_ce_cipher_fallback(areq);
350
351 e = sun8i_ce_get_engine_number(op->ce);
352 rctx->flow = e;
353 engine = op->ce->chanlist[e].engine;
354
355 return crypto_transfer_skcipher_request_to_engine(engine, areq);
356 }
357
sun8i_ce_skencrypt(struct skcipher_request * areq)358 int sun8i_ce_skencrypt(struct skcipher_request *areq)
359 {
360 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
361 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
362 struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
363 struct crypto_engine *engine;
364 int e;
365
366 rctx->op_dir = CE_ENCRYPTION;
367 if (sun8i_ce_cipher_need_fallback(areq))
368 return sun8i_ce_cipher_fallback(areq);
369
370 e = sun8i_ce_get_engine_number(op->ce);
371 rctx->flow = e;
372 engine = op->ce->chanlist[e].engine;
373
374 return crypto_transfer_skcipher_request_to_engine(engine, areq);
375 }
376
sun8i_ce_cipher_init(struct crypto_tfm * tfm)377 int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
378 {
379 struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
380 struct sun8i_ce_alg_template *algt;
381 const char *name = crypto_tfm_alg_name(tfm);
382 struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm);
383 struct skcipher_alg *alg = crypto_skcipher_alg(sktfm);
384 int err;
385
386 memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx));
387
388 algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher);
389 op->ce = algt->ce;
390
391 op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
392 if (IS_ERR(op->fallback_tfm)) {
393 dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
394 name, PTR_ERR(op->fallback_tfm));
395 return PTR_ERR(op->fallback_tfm);
396 }
397
398 sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
399 crypto_skcipher_reqsize(op->fallback_tfm);
400
401
402 dev_info(op->ce->dev, "Fallback for %s is %s\n",
403 crypto_tfm_alg_driver_name(&sktfm->base),
404 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)));
405
406 op->enginectx.op.do_one_request = sun8i_ce_cipher_run;
407 op->enginectx.op.prepare_request = sun8i_ce_cipher_prepare;
408 op->enginectx.op.unprepare_request = sun8i_ce_cipher_unprepare;
409
410 err = pm_runtime_get_sync(op->ce->dev);
411 if (err < 0)
412 goto error_pm;
413
414 return 0;
415 error_pm:
416 pm_runtime_put_noidle(op->ce->dev);
417 crypto_free_skcipher(op->fallback_tfm);
418 return err;
419 }
420
sun8i_ce_cipher_exit(struct crypto_tfm * tfm)421 void sun8i_ce_cipher_exit(struct crypto_tfm *tfm)
422 {
423 struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm);
424
425 kfree_sensitive(op->key);
426 crypto_free_skcipher(op->fallback_tfm);
427 pm_runtime_put_sync_suspend(op->ce->dev);
428 }
429
sun8i_ce_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)430 int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
431 unsigned int keylen)
432 {
433 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
434 struct sun8i_ce_dev *ce = op->ce;
435
436 switch (keylen) {
437 case 128 / 8:
438 break;
439 case 192 / 8:
440 break;
441 case 256 / 8:
442 break;
443 default:
444 dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen);
445 return -EINVAL;
446 }
447 kfree_sensitive(op->key);
448 op->keylen = keylen;
449 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
450 if (!op->key)
451 return -ENOMEM;
452
453 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
454 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
455
456 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
457 }
458
sun8i_ce_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)459 int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
460 unsigned int keylen)
461 {
462 struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
463 int err;
464
465 err = verify_skcipher_des3_key(tfm, key);
466 if (err)
467 return err;
468
469 kfree_sensitive(op->key);
470 op->keylen = keylen;
471 op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
472 if (!op->key)
473 return -ENOMEM;
474
475 crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
476 crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
477
478 return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
479 }
480