• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Crypto acceleration support for Rockchip RK3288
4  *
5  * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
6  *
7  * Author: Zain Wang <zain.wang@rock-chips.com>
8  *
9  * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
10  */
11 #include <linux/device.h>
12 #include <asm/unaligned.h>
13 #include "rk3288_crypto.h"
14 
15 /*
16  * IC can not process zero message hash,
17  * so we put the fixed hash out when met zero message.
18  */
19 
rk_ahash_need_fallback(struct ahash_request * req)20 static bool rk_ahash_need_fallback(struct ahash_request *req)
21 {
22 	struct scatterlist *sg;
23 
24 	sg = req->src;
25 	while (sg) {
26 		if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
27 			return true;
28 		}
29 		if (sg->length % 4) {
30 			return true;
31 		}
32 		sg = sg_next(sg);
33 	}
34 	return false;
35 }
36 
rk_ahash_digest_fb(struct ahash_request * areq)37 static int rk_ahash_digest_fb(struct ahash_request *areq)
38 {
39 	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
40 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
41 	struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm);
42 
43 	ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
44 	rctx->fallback_req.base.flags = areq->base.flags &
45 					CRYPTO_TFM_REQ_MAY_SLEEP;
46 
47 	rctx->fallback_req.nbytes = areq->nbytes;
48 	rctx->fallback_req.src = areq->src;
49 	rctx->fallback_req.result = areq->result;
50 
51 	return crypto_ahash_digest(&rctx->fallback_req);
52 }
53 
zero_message_process(struct ahash_request * req)54 static int zero_message_process(struct ahash_request *req)
55 {
56 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
57 	int rk_digest_size = crypto_ahash_digestsize(tfm);
58 
59 	switch (rk_digest_size) {
60 	case SHA1_DIGEST_SIZE:
61 		memcpy(req->result, sha1_zero_message_hash, rk_digest_size);
62 		break;
63 	case SHA256_DIGEST_SIZE:
64 		memcpy(req->result, sha256_zero_message_hash, rk_digest_size);
65 		break;
66 	case MD5_DIGEST_SIZE:
67 		memcpy(req->result, md5_zero_message_hash, rk_digest_size);
68 		break;
69 	default:
70 		return -EINVAL;
71 	}
72 
73 	return 0;
74 }
75 
rk_ahash_reg_init(struct ahash_request * req)76 static void rk_ahash_reg_init(struct ahash_request *req)
77 {
78 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
79 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
80 	struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
81 	struct rk_crypto_info *dev = tctx->dev;
82 	int reg_status;
83 
84 	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) |
85 		     RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16);
86 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
87 
88 	reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL);
89 	reg_status &= (~RK_CRYPTO_HASH_FLUSH);
90 	reg_status |= _SBF(0xffff, 16);
91 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status);
92 
93 	memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32);
94 
95 	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA |
96 					    RK_CRYPTO_HRDMA_DONE_ENA);
97 
98 	CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT |
99 					    RK_CRYPTO_HRDMA_DONE_INT);
100 
101 	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode |
102 					       RK_CRYPTO_HASH_SWAP_DO);
103 
104 	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO |
105 					  RK_CRYPTO_BYTESWAP_BRFIFO |
106 					  RK_CRYPTO_BYTESWAP_BTFIFO);
107 
108 	CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes);
109 }
110 
rk_ahash_init(struct ahash_request * req)111 static int rk_ahash_init(struct ahash_request *req)
112 {
113 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
114 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
115 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
116 
117 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
118 	rctx->fallback_req.base.flags = req->base.flags &
119 					CRYPTO_TFM_REQ_MAY_SLEEP;
120 
121 	return crypto_ahash_init(&rctx->fallback_req);
122 }
123 
rk_ahash_update(struct ahash_request * req)124 static int rk_ahash_update(struct ahash_request *req)
125 {
126 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
127 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
128 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
129 
130 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
131 	rctx->fallback_req.base.flags = req->base.flags &
132 					CRYPTO_TFM_REQ_MAY_SLEEP;
133 	rctx->fallback_req.nbytes = req->nbytes;
134 	rctx->fallback_req.src = req->src;
135 
136 	return crypto_ahash_update(&rctx->fallback_req);
137 }
138 
rk_ahash_final(struct ahash_request * req)139 static int rk_ahash_final(struct ahash_request *req)
140 {
141 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
142 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
143 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
144 
145 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
146 	rctx->fallback_req.base.flags = req->base.flags &
147 					CRYPTO_TFM_REQ_MAY_SLEEP;
148 	rctx->fallback_req.result = req->result;
149 
150 	return crypto_ahash_final(&rctx->fallback_req);
151 }
152 
rk_ahash_finup(struct ahash_request * req)153 static int rk_ahash_finup(struct ahash_request *req)
154 {
155 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
156 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
157 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
158 
159 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
160 	rctx->fallback_req.base.flags = req->base.flags &
161 					CRYPTO_TFM_REQ_MAY_SLEEP;
162 
163 	rctx->fallback_req.nbytes = req->nbytes;
164 	rctx->fallback_req.src = req->src;
165 	rctx->fallback_req.result = req->result;
166 
167 	return crypto_ahash_finup(&rctx->fallback_req);
168 }
169 
rk_ahash_import(struct ahash_request * req,const void * in)170 static int rk_ahash_import(struct ahash_request *req, const void *in)
171 {
172 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
173 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
174 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
175 
176 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
177 	rctx->fallback_req.base.flags = req->base.flags &
178 					CRYPTO_TFM_REQ_MAY_SLEEP;
179 
180 	return crypto_ahash_import(&rctx->fallback_req, in);
181 }
182 
rk_ahash_export(struct ahash_request * req,void * out)183 static int rk_ahash_export(struct ahash_request *req, void *out)
184 {
185 	struct rk_ahash_rctx *rctx = ahash_request_ctx(req);
186 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
187 	struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
188 
189 	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
190 	rctx->fallback_req.base.flags = req->base.flags &
191 					CRYPTO_TFM_REQ_MAY_SLEEP;
192 
193 	return crypto_ahash_export(&rctx->fallback_req, out);
194 }
195 
rk_ahash_digest(struct ahash_request * req)196 static int rk_ahash_digest(struct ahash_request *req)
197 {
198 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
199 	struct rk_crypto_info *dev = tctx->dev;
200 
201 	if (rk_ahash_need_fallback(req))
202 		return rk_ahash_digest_fb(req);
203 
204 	if (!req->nbytes)
205 		return zero_message_process(req);
206 
207 	return crypto_transfer_hash_request_to_engine(dev->engine, req);
208 }
209 
crypto_ahash_dma_start(struct rk_crypto_info * dev,struct scatterlist * sg)210 static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg)
211 {
212 	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg));
213 	CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4);
214 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START |
215 					  (RK_CRYPTO_HASH_START << 16));
216 }
217 
rk_hash_prepare(struct crypto_engine * engine,void * breq)218 static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
219 {
220 	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
221 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
222 	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
223 	struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
224 	int ret;
225 
226 	ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
227 	if (ret <= 0)
228 		return -EINVAL;
229 
230 	rctx->nrsg = ret;
231 
232 	return 0;
233 }
234 
rk_hash_unprepare(struct crypto_engine * engine,void * breq)235 static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
236 {
237 	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
238 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
239 	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
240 	struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
241 
242 	dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
243 	return 0;
244 }
245 
rk_hash_run(struct crypto_engine * engine,void * breq)246 static int rk_hash_run(struct crypto_engine *engine, void *breq)
247 {
248 	struct ahash_request *areq = container_of(breq, struct ahash_request, base);
249 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
250 	struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
251 	struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
252 	struct scatterlist *sg = areq->src;
253 	int err = 0;
254 	int i;
255 	u32 v;
256 
257 	rctx->mode = 0;
258 
259 	switch (crypto_ahash_digestsize(tfm)) {
260 	case SHA1_DIGEST_SIZE:
261 		rctx->mode = RK_CRYPTO_HASH_SHA1;
262 		break;
263 	case SHA256_DIGEST_SIZE:
264 		rctx->mode = RK_CRYPTO_HASH_SHA256;
265 		break;
266 	case MD5_DIGEST_SIZE:
267 		rctx->mode = RK_CRYPTO_HASH_MD5;
268 		break;
269 	default:
270 		err =  -EINVAL;
271 		goto theend;
272 	}
273 
274 	rk_ahash_reg_init(areq);
275 
276 	while (sg) {
277 		reinit_completion(&tctx->dev->complete);
278 		tctx->dev->status = 0;
279 		crypto_ahash_dma_start(tctx->dev, sg);
280 		wait_for_completion_interruptible_timeout(&tctx->dev->complete,
281 							  msecs_to_jiffies(2000));
282 		if (!tctx->dev->status) {
283 			dev_err(tctx->dev->dev, "DMA timeout\n");
284 			err = -EFAULT;
285 			goto theend;
286 		}
287 		sg = sg_next(sg);
288 	}
289 
290 		/*
291 		 * it will take some time to process date after last dma
292 		 * transmission.
293 		 *
294 		 * waiting time is relative with the last date len,
295 		 * so cannot set a fixed time here.
296 		 * 10us makes system not call here frequently wasting
297 		 * efficiency, and make it response quickly when dma
298 		 * complete.
299 		 */
300 	while (!CRYPTO_READ(tctx->dev, RK_CRYPTO_HASH_STS))
301 		udelay(10);
302 
303 	for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
304 		v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
305 		put_unaligned_le32(v, areq->result + i * 4);
306 	}
307 
308 theend:
309 	local_bh_disable();
310 	crypto_finalize_hash_request(engine, breq, err);
311 	local_bh_enable();
312 
313 	return 0;
314 }
315 
rk_cra_hash_init(struct crypto_tfm * tfm)316 static int rk_cra_hash_init(struct crypto_tfm *tfm)
317 {
318 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
319 	struct rk_crypto_tmp *algt;
320 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
321 
322 	const char *alg_name = crypto_tfm_alg_name(tfm);
323 
324 	algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
325 
326 	tctx->dev = algt->dev;
327 
328 	/* for fallback */
329 	tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0,
330 					       CRYPTO_ALG_NEED_FALLBACK);
331 	if (IS_ERR(tctx->fallback_tfm)) {
332 		dev_err(tctx->dev->dev, "Could not load fallback driver.\n");
333 		return PTR_ERR(tctx->fallback_tfm);
334 	}
335 
336 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
337 				 sizeof(struct rk_ahash_rctx) +
338 				 crypto_ahash_reqsize(tctx->fallback_tfm));
339 
340 	tctx->enginectx.op.do_one_request = rk_hash_run;
341 	tctx->enginectx.op.prepare_request = rk_hash_prepare;
342 	tctx->enginectx.op.unprepare_request = rk_hash_unprepare;
343 
344 	return 0;
345 }
346 
rk_cra_hash_exit(struct crypto_tfm * tfm)347 static void rk_cra_hash_exit(struct crypto_tfm *tfm)
348 {
349 	struct rk_ahash_ctx *tctx = crypto_tfm_ctx(tfm);
350 
351 	crypto_free_ahash(tctx->fallback_tfm);
352 }
353 
354 struct rk_crypto_tmp rk_ahash_sha1 = {
355 	.type = ALG_TYPE_HASH,
356 	.alg.hash = {
357 		.init = rk_ahash_init,
358 		.update = rk_ahash_update,
359 		.final = rk_ahash_final,
360 		.finup = rk_ahash_finup,
361 		.export = rk_ahash_export,
362 		.import = rk_ahash_import,
363 		.digest = rk_ahash_digest,
364 		.halg = {
365 			 .digestsize = SHA1_DIGEST_SIZE,
366 			 .statesize = sizeof(struct sha1_state),
367 			 .base = {
368 				  .cra_name = "sha1",
369 				  .cra_driver_name = "rk-sha1",
370 				  .cra_priority = 300,
371 				  .cra_flags = CRYPTO_ALG_ASYNC |
372 					       CRYPTO_ALG_NEED_FALLBACK,
373 				  .cra_blocksize = SHA1_BLOCK_SIZE,
374 				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
375 				  .cra_alignmask = 3,
376 				  .cra_init = rk_cra_hash_init,
377 				  .cra_exit = rk_cra_hash_exit,
378 				  .cra_module = THIS_MODULE,
379 				  }
380 			 }
381 	}
382 };
383 
384 struct rk_crypto_tmp rk_ahash_sha256 = {
385 	.type = ALG_TYPE_HASH,
386 	.alg.hash = {
387 		.init = rk_ahash_init,
388 		.update = rk_ahash_update,
389 		.final = rk_ahash_final,
390 		.finup = rk_ahash_finup,
391 		.export = rk_ahash_export,
392 		.import = rk_ahash_import,
393 		.digest = rk_ahash_digest,
394 		.halg = {
395 			 .digestsize = SHA256_DIGEST_SIZE,
396 			 .statesize = sizeof(struct sha256_state),
397 			 .base = {
398 				  .cra_name = "sha256",
399 				  .cra_driver_name = "rk-sha256",
400 				  .cra_priority = 300,
401 				  .cra_flags = CRYPTO_ALG_ASYNC |
402 					       CRYPTO_ALG_NEED_FALLBACK,
403 				  .cra_blocksize = SHA256_BLOCK_SIZE,
404 				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
405 				  .cra_alignmask = 3,
406 				  .cra_init = rk_cra_hash_init,
407 				  .cra_exit = rk_cra_hash_exit,
408 				  .cra_module = THIS_MODULE,
409 				  }
410 			 }
411 	}
412 };
413 
414 struct rk_crypto_tmp rk_ahash_md5 = {
415 	.type = ALG_TYPE_HASH,
416 	.alg.hash = {
417 		.init = rk_ahash_init,
418 		.update = rk_ahash_update,
419 		.final = rk_ahash_final,
420 		.finup = rk_ahash_finup,
421 		.export = rk_ahash_export,
422 		.import = rk_ahash_import,
423 		.digest = rk_ahash_digest,
424 		.halg = {
425 			 .digestsize = MD5_DIGEST_SIZE,
426 			 .statesize = sizeof(struct md5_state),
427 			 .base = {
428 				  .cra_name = "md5",
429 				  .cra_driver_name = "rk-md5",
430 				  .cra_priority = 300,
431 				  .cra_flags = CRYPTO_ALG_ASYNC |
432 					       CRYPTO_ALG_NEED_FALLBACK,
433 				  .cra_blocksize = SHA1_BLOCK_SIZE,
434 				  .cra_ctxsize = sizeof(struct rk_ahash_ctx),
435 				  .cra_alignmask = 3,
436 				  .cra_init = rk_cra_hash_init,
437 				  .cra_exit = rk_cra_hash_exit,
438 				  .cra_module = THIS_MODULE,
439 				  }
440 			}
441 	}
442 };
443