• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Support for Marvell's Cryptographic Engine and Security Accelerator (CESA)
3  * that can be found on the following platform: Orion, Kirkwood, Armada. This
4  * driver supports the TDMA engine on platforms on which it is available.
5  *
6  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7  * Author: Arnaud Ebalard <arno@natisbad.org>
8  *
9  * This work is based on an initial version written by
10  * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms of the GNU General Public License version 2 as published
14  * by the Free Software Foundation.
15  */
16 
17 #include <linux/delay.h>
18 #include <linux/genalloc.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/kthread.h>
22 #include <linux/mbus.h>
23 #include <linux/platform_device.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
27 #include <linux/clk.h>
28 #include <linux/of.h>
29 #include <linux/of_platform.h>
30 #include <linux/of_irq.h>
31 
32 #include "cesa.h"
33 
34 /* Limit of the crypto queue before reaching the backlog */
35 #define CESA_CRYPTO_DEFAULT_MAX_QLEN 128
36 
37 static int allhwsupport = !IS_ENABLED(CONFIG_CRYPTO_DEV_MV_CESA);
38 module_param_named(allhwsupport, allhwsupport, int, 0444);
39 MODULE_PARM_DESC(allhwsupport, "Enable support for all hardware (even it if overlaps with the mv_cesa driver)");
40 
41 struct mv_cesa_dev *cesa_dev;
42 
43 struct crypto_async_request *
mv_cesa_dequeue_req_locked(struct mv_cesa_engine * engine,struct crypto_async_request ** backlog)44 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
45 			   struct crypto_async_request **backlog)
46 {
47 	struct crypto_async_request *req;
48 
49 	*backlog = crypto_get_backlog(&engine->queue);
50 	req = crypto_dequeue_request(&engine->queue);
51 
52 	if (!req)
53 		return NULL;
54 
55 	return req;
56 }
57 
mv_cesa_rearm_engine(struct mv_cesa_engine * engine)58 static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
59 {
60 	struct crypto_async_request *req = NULL, *backlog = NULL;
61 	struct mv_cesa_ctx *ctx;
62 
63 
64 	spin_lock_bh(&engine->lock);
65 	if (!engine->req) {
66 		req = mv_cesa_dequeue_req_locked(engine, &backlog);
67 		engine->req = req;
68 	}
69 	spin_unlock_bh(&engine->lock);
70 
71 	if (!req)
72 		return;
73 
74 	if (backlog)
75 		backlog->complete(backlog, -EINPROGRESS);
76 
77 	ctx = crypto_tfm_ctx(req->tfm);
78 	ctx->ops->step(req);
79 
80 	return;
81 }
82 
mv_cesa_std_process(struct mv_cesa_engine * engine,u32 status)83 static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
84 {
85 	struct crypto_async_request *req;
86 	struct mv_cesa_ctx *ctx;
87 	int res;
88 
89 	req = engine->req;
90 	ctx = crypto_tfm_ctx(req->tfm);
91 	res = ctx->ops->process(req, status);
92 
93 	if (res == 0) {
94 		ctx->ops->complete(req);
95 		mv_cesa_engine_enqueue_complete_request(engine, req);
96 	} else if (res == -EINPROGRESS) {
97 		ctx->ops->step(req);
98 	}
99 
100 	return res;
101 }
102 
mv_cesa_int_process(struct mv_cesa_engine * engine,u32 status)103 static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
104 {
105 	if (engine->chain.first && engine->chain.last)
106 		return mv_cesa_tdma_process(engine, status);
107 
108 	return mv_cesa_std_process(engine, status);
109 }
110 
111 static inline void
mv_cesa_complete_req(struct mv_cesa_ctx * ctx,struct crypto_async_request * req,int res)112 mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
113 		     int res)
114 {
115 	ctx->ops->cleanup(req);
116 	local_bh_disable();
117 	req->complete(req, res);
118 	local_bh_enable();
119 }
120 
mv_cesa_int(int irq,void * priv)121 static irqreturn_t mv_cesa_int(int irq, void *priv)
122 {
123 	struct mv_cesa_engine *engine = priv;
124 	struct crypto_async_request *req;
125 	struct mv_cesa_ctx *ctx;
126 	u32 status, mask;
127 	irqreturn_t ret = IRQ_NONE;
128 
129 	while (true) {
130 		int res;
131 
132 		mask = mv_cesa_get_int_mask(engine);
133 		status = readl(engine->regs + CESA_SA_INT_STATUS);
134 
135 		if (!(status & mask))
136 			break;
137 
138 		/*
139 		 * TODO: avoid clearing the FPGA_INT_STATUS if this not
140 		 * relevant on some platforms.
141 		 */
142 		writel(~status, engine->regs + CESA_SA_FPGA_INT_STATUS);
143 		writel(~status, engine->regs + CESA_SA_INT_STATUS);
144 
145 		/* Process fetched requests */
146 		res = mv_cesa_int_process(engine, status & mask);
147 		ret = IRQ_HANDLED;
148 
149 		spin_lock_bh(&engine->lock);
150 		req = engine->req;
151 		if (res != -EINPROGRESS)
152 			engine->req = NULL;
153 		spin_unlock_bh(&engine->lock);
154 
155 		ctx = crypto_tfm_ctx(req->tfm);
156 
157 		if (res && res != -EINPROGRESS)
158 			mv_cesa_complete_req(ctx, req, res);
159 
160 		/* Launch the next pending request */
161 		mv_cesa_rearm_engine(engine);
162 
163 		/* Iterate over the complete queue */
164 		while (true) {
165 			req = mv_cesa_engine_dequeue_complete_request(engine);
166 			if (!req)
167 				break;
168 
169 			ctx = crypto_tfm_ctx(req->tfm);
170 			mv_cesa_complete_req(ctx, req, 0);
171 		}
172 	}
173 
174 	return ret;
175 }
176 
mv_cesa_queue_req(struct crypto_async_request * req,struct mv_cesa_req * creq)177 int mv_cesa_queue_req(struct crypto_async_request *req,
178 		      struct mv_cesa_req *creq)
179 {
180 	int ret;
181 	struct mv_cesa_engine *engine = creq->engine;
182 
183 	spin_lock_bh(&engine->lock);
184 	ret = crypto_enqueue_request(&engine->queue, req);
185 	if ((mv_cesa_req_get_type(creq) == CESA_DMA_REQ) &&
186 	    (ret == -EINPROGRESS ||
187 	    (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))
188 		mv_cesa_tdma_chain(engine, creq);
189 	spin_unlock_bh(&engine->lock);
190 
191 	if (ret != -EINPROGRESS)
192 		return ret;
193 
194 	mv_cesa_rearm_engine(engine);
195 
196 	return -EINPROGRESS;
197 }
198 
mv_cesa_add_algs(struct mv_cesa_dev * cesa)199 static int mv_cesa_add_algs(struct mv_cesa_dev *cesa)
200 {
201 	int ret;
202 	int i, j;
203 
204 	for (i = 0; i < cesa->caps->ncipher_algs; i++) {
205 		ret = crypto_register_alg(cesa->caps->cipher_algs[i]);
206 		if (ret)
207 			goto err_unregister_crypto;
208 	}
209 
210 	for (i = 0; i < cesa->caps->nahash_algs; i++) {
211 		ret = crypto_register_ahash(cesa->caps->ahash_algs[i]);
212 		if (ret)
213 			goto err_unregister_ahash;
214 	}
215 
216 	return 0;
217 
218 err_unregister_ahash:
219 	for (j = 0; j < i; j++)
220 		crypto_unregister_ahash(cesa->caps->ahash_algs[j]);
221 	i = cesa->caps->ncipher_algs;
222 
223 err_unregister_crypto:
224 	for (j = 0; j < i; j++)
225 		crypto_unregister_alg(cesa->caps->cipher_algs[j]);
226 
227 	return ret;
228 }
229 
mv_cesa_remove_algs(struct mv_cesa_dev * cesa)230 static void mv_cesa_remove_algs(struct mv_cesa_dev *cesa)
231 {
232 	int i;
233 
234 	for (i = 0; i < cesa->caps->nahash_algs; i++)
235 		crypto_unregister_ahash(cesa->caps->ahash_algs[i]);
236 
237 	for (i = 0; i < cesa->caps->ncipher_algs; i++)
238 		crypto_unregister_alg(cesa->caps->cipher_algs[i]);
239 }
240 
241 static struct crypto_alg *orion_cipher_algs[] = {
242 	&mv_cesa_ecb_des_alg,
243 	&mv_cesa_cbc_des_alg,
244 	&mv_cesa_ecb_des3_ede_alg,
245 	&mv_cesa_cbc_des3_ede_alg,
246 	&mv_cesa_ecb_aes_alg,
247 	&mv_cesa_cbc_aes_alg,
248 };
249 
250 static struct ahash_alg *orion_ahash_algs[] = {
251 	&mv_md5_alg,
252 	&mv_sha1_alg,
253 	&mv_ahmac_md5_alg,
254 	&mv_ahmac_sha1_alg,
255 };
256 
257 static struct crypto_alg *armada_370_cipher_algs[] = {
258 	&mv_cesa_ecb_des_alg,
259 	&mv_cesa_cbc_des_alg,
260 	&mv_cesa_ecb_des3_ede_alg,
261 	&mv_cesa_cbc_des3_ede_alg,
262 	&mv_cesa_ecb_aes_alg,
263 	&mv_cesa_cbc_aes_alg,
264 };
265 
266 static struct ahash_alg *armada_370_ahash_algs[] = {
267 	&mv_md5_alg,
268 	&mv_sha1_alg,
269 	&mv_sha256_alg,
270 	&mv_ahmac_md5_alg,
271 	&mv_ahmac_sha1_alg,
272 	&mv_ahmac_sha256_alg,
273 };
274 
275 static const struct mv_cesa_caps orion_caps = {
276 	.nengines = 1,
277 	.cipher_algs = orion_cipher_algs,
278 	.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
279 	.ahash_algs = orion_ahash_algs,
280 	.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
281 	.has_tdma = false,
282 };
283 
284 static const struct mv_cesa_caps kirkwood_caps = {
285 	.nengines = 1,
286 	.cipher_algs = orion_cipher_algs,
287 	.ncipher_algs = ARRAY_SIZE(orion_cipher_algs),
288 	.ahash_algs = orion_ahash_algs,
289 	.nahash_algs = ARRAY_SIZE(orion_ahash_algs),
290 	.has_tdma = true,
291 };
292 
293 static const struct mv_cesa_caps armada_370_caps = {
294 	.nengines = 1,
295 	.cipher_algs = armada_370_cipher_algs,
296 	.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
297 	.ahash_algs = armada_370_ahash_algs,
298 	.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
299 	.has_tdma = true,
300 };
301 
302 static const struct mv_cesa_caps armada_xp_caps = {
303 	.nengines = 2,
304 	.cipher_algs = armada_370_cipher_algs,
305 	.ncipher_algs = ARRAY_SIZE(armada_370_cipher_algs),
306 	.ahash_algs = armada_370_ahash_algs,
307 	.nahash_algs = ARRAY_SIZE(armada_370_ahash_algs),
308 	.has_tdma = true,
309 };
310 
311 static const struct of_device_id mv_cesa_of_match_table[] = {
312 	{ .compatible = "marvell,orion-crypto", .data = &orion_caps },
313 	{ .compatible = "marvell,kirkwood-crypto", .data = &kirkwood_caps },
314 	{ .compatible = "marvell,dove-crypto", .data = &kirkwood_caps },
315 	{ .compatible = "marvell,armada-370-crypto", .data = &armada_370_caps },
316 	{ .compatible = "marvell,armada-xp-crypto", .data = &armada_xp_caps },
317 	{ .compatible = "marvell,armada-375-crypto", .data = &armada_xp_caps },
318 	{ .compatible = "marvell,armada-38x-crypto", .data = &armada_xp_caps },
319 	{}
320 };
321 MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
322 
323 static void
mv_cesa_conf_mbus_windows(struct mv_cesa_engine * engine,const struct mbus_dram_target_info * dram)324 mv_cesa_conf_mbus_windows(struct mv_cesa_engine *engine,
325 			  const struct mbus_dram_target_info *dram)
326 {
327 	void __iomem *iobase = engine->regs;
328 	int i;
329 
330 	for (i = 0; i < 4; i++) {
331 		writel(0, iobase + CESA_TDMA_WINDOW_CTRL(i));
332 		writel(0, iobase + CESA_TDMA_WINDOW_BASE(i));
333 	}
334 
335 	for (i = 0; i < dram->num_cs; i++) {
336 		const struct mbus_dram_window *cs = dram->cs + i;
337 
338 		writel(((cs->size - 1) & 0xffff0000) |
339 		       (cs->mbus_attr << 8) |
340 		       (dram->mbus_dram_target_id << 4) | 1,
341 		       iobase + CESA_TDMA_WINDOW_CTRL(i));
342 		writel(cs->base, iobase + CESA_TDMA_WINDOW_BASE(i));
343 	}
344 }
345 
mv_cesa_dev_dma_init(struct mv_cesa_dev * cesa)346 static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
347 {
348 	struct device *dev = cesa->dev;
349 	struct mv_cesa_dev_dma *dma;
350 
351 	if (!cesa->caps->has_tdma)
352 		return 0;
353 
354 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
355 	if (!dma)
356 		return -ENOMEM;
357 
358 	dma->tdma_desc_pool = dmam_pool_create("tdma_desc", dev,
359 					sizeof(struct mv_cesa_tdma_desc),
360 					16, 0);
361 	if (!dma->tdma_desc_pool)
362 		return -ENOMEM;
363 
364 	dma->op_pool = dmam_pool_create("cesa_op", dev,
365 					sizeof(struct mv_cesa_op_ctx), 16, 0);
366 	if (!dma->op_pool)
367 		return -ENOMEM;
368 
369 	dma->cache_pool = dmam_pool_create("cesa_cache", dev,
370 					   CESA_MAX_HASH_BLOCK_SIZE, 1, 0);
371 	if (!dma->cache_pool)
372 		return -ENOMEM;
373 
374 	dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
375 	if (!dma->padding_pool)
376 		return -ENOMEM;
377 
378 	cesa->dma = dma;
379 
380 	return 0;
381 }
382 
mv_cesa_get_sram(struct platform_device * pdev,int idx)383 static int mv_cesa_get_sram(struct platform_device *pdev, int idx)
384 {
385 	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
386 	struct mv_cesa_engine *engine = &cesa->engines[idx];
387 	const char *res_name = "sram";
388 	struct resource *res;
389 
390 	engine->pool = of_gen_pool_get(cesa->dev->of_node,
391 				       "marvell,crypto-srams", idx);
392 	if (engine->pool) {
393 		engine->sram = gen_pool_dma_alloc(engine->pool,
394 						  cesa->sram_size,
395 						  &engine->sram_dma);
396 		if (engine->sram)
397 			return 0;
398 
399 		engine->pool = NULL;
400 		return -ENOMEM;
401 	}
402 
403 	if (cesa->caps->nengines > 1) {
404 		if (!idx)
405 			res_name = "sram0";
406 		else
407 			res_name = "sram1";
408 	}
409 
410 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
411 					   res_name);
412 	if (!res || resource_size(res) < cesa->sram_size)
413 		return -EINVAL;
414 
415 	engine->sram = devm_ioremap_resource(cesa->dev, res);
416 	if (IS_ERR(engine->sram))
417 		return PTR_ERR(engine->sram);
418 
419 	engine->sram_dma = phys_to_dma(cesa->dev,
420 				       (phys_addr_t)res->start);
421 
422 	return 0;
423 }
424 
mv_cesa_put_sram(struct platform_device * pdev,int idx)425 static void mv_cesa_put_sram(struct platform_device *pdev, int idx)
426 {
427 	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
428 	struct mv_cesa_engine *engine = &cesa->engines[idx];
429 
430 	if (!engine->pool)
431 		return;
432 
433 	gen_pool_free(engine->pool, (unsigned long)engine->sram,
434 		      cesa->sram_size);
435 }
436 
mv_cesa_probe(struct platform_device * pdev)437 static int mv_cesa_probe(struct platform_device *pdev)
438 {
439 	const struct mv_cesa_caps *caps = &orion_caps;
440 	const struct mbus_dram_target_info *dram;
441 	const struct of_device_id *match;
442 	struct device *dev = &pdev->dev;
443 	struct mv_cesa_dev *cesa;
444 	struct mv_cesa_engine *engines;
445 	struct resource *res;
446 	int irq, ret, i;
447 	u32 sram_size;
448 
449 	if (cesa_dev) {
450 		dev_err(&pdev->dev, "Only one CESA device authorized\n");
451 		return -EEXIST;
452 	}
453 
454 	if (dev->of_node) {
455 		match = of_match_node(mv_cesa_of_match_table, dev->of_node);
456 		if (!match || !match->data)
457 			return -ENOTSUPP;
458 
459 		caps = match->data;
460 	}
461 
462 	if ((caps == &orion_caps || caps == &kirkwood_caps) && !allhwsupport)
463 		return -ENOTSUPP;
464 
465 	cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
466 	if (!cesa)
467 		return -ENOMEM;
468 
469 	cesa->caps = caps;
470 	cesa->dev = dev;
471 
472 	sram_size = CESA_SA_DEFAULT_SRAM_SIZE;
473 	of_property_read_u32(cesa->dev->of_node, "marvell,crypto-sram-size",
474 			     &sram_size);
475 	if (sram_size < CESA_SA_MIN_SRAM_SIZE)
476 		sram_size = CESA_SA_MIN_SRAM_SIZE;
477 
478 	cesa->sram_size = sram_size;
479 	cesa->engines = devm_kzalloc(dev, caps->nengines * sizeof(*engines),
480 				     GFP_KERNEL);
481 	if (!cesa->engines)
482 		return -ENOMEM;
483 
484 	spin_lock_init(&cesa->lock);
485 
486 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
487 	cesa->regs = devm_ioremap_resource(dev, res);
488 	if (IS_ERR(cesa->regs))
489 		return PTR_ERR(cesa->regs);
490 
491 	ret = mv_cesa_dev_dma_init(cesa);
492 	if (ret)
493 		return ret;
494 
495 	dram = mv_mbus_dram_info_nooverlap();
496 
497 	platform_set_drvdata(pdev, cesa);
498 
499 	for (i = 0; i < caps->nengines; i++) {
500 		struct mv_cesa_engine *engine = &cesa->engines[i];
501 		char res_name[7];
502 
503 		engine->id = i;
504 		spin_lock_init(&engine->lock);
505 
506 		ret = mv_cesa_get_sram(pdev, i);
507 		if (ret)
508 			goto err_cleanup;
509 
510 		irq = platform_get_irq(pdev, i);
511 		if (irq < 0) {
512 			ret = irq;
513 			goto err_cleanup;
514 		}
515 
516 		/*
517 		 * Not all platforms can gate the CESA clocks: do not complain
518 		 * if the clock does not exist.
519 		 */
520 		snprintf(res_name, sizeof(res_name), "cesa%d", i);
521 		engine->clk = devm_clk_get(dev, res_name);
522 		if (IS_ERR(engine->clk)) {
523 			engine->clk = devm_clk_get(dev, NULL);
524 			if (IS_ERR(engine->clk))
525 				engine->clk = NULL;
526 		}
527 
528 		snprintf(res_name, sizeof(res_name), "cesaz%d", i);
529 		engine->zclk = devm_clk_get(dev, res_name);
530 		if (IS_ERR(engine->zclk))
531 			engine->zclk = NULL;
532 
533 		ret = clk_prepare_enable(engine->clk);
534 		if (ret)
535 			goto err_cleanup;
536 
537 		ret = clk_prepare_enable(engine->zclk);
538 		if (ret)
539 			goto err_cleanup;
540 
541 		engine->regs = cesa->regs + CESA_ENGINE_OFF(i);
542 
543 		if (dram && cesa->caps->has_tdma)
544 			mv_cesa_conf_mbus_windows(engine, dram);
545 
546 		writel(0, engine->regs + CESA_SA_INT_STATUS);
547 		writel(CESA_SA_CFG_STOP_DIG_ERR,
548 		       engine->regs + CESA_SA_CFG);
549 		writel(engine->sram_dma & CESA_SA_SRAM_MSK,
550 		       engine->regs + CESA_SA_DESC_P0);
551 
552 		ret = devm_request_threaded_irq(dev, irq, NULL, mv_cesa_int,
553 						IRQF_ONESHOT,
554 						dev_name(&pdev->dev),
555 						engine);
556 		if (ret)
557 			goto err_cleanup;
558 
559 		crypto_init_queue(&engine->queue, CESA_CRYPTO_DEFAULT_MAX_QLEN);
560 		atomic_set(&engine->load, 0);
561 		INIT_LIST_HEAD(&engine->complete_queue);
562 	}
563 
564 	cesa_dev = cesa;
565 
566 	ret = mv_cesa_add_algs(cesa);
567 	if (ret) {
568 		cesa_dev = NULL;
569 		goto err_cleanup;
570 	}
571 
572 	dev_info(dev, "CESA device successfully registered\n");
573 
574 	return 0;
575 
576 err_cleanup:
577 	for (i = 0; i < caps->nengines; i++) {
578 		clk_disable_unprepare(cesa->engines[i].zclk);
579 		clk_disable_unprepare(cesa->engines[i].clk);
580 		mv_cesa_put_sram(pdev, i);
581 	}
582 
583 	return ret;
584 }
585 
mv_cesa_remove(struct platform_device * pdev)586 static int mv_cesa_remove(struct platform_device *pdev)
587 {
588 	struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
589 	int i;
590 
591 	mv_cesa_remove_algs(cesa);
592 
593 	for (i = 0; i < cesa->caps->nengines; i++) {
594 		clk_disable_unprepare(cesa->engines[i].zclk);
595 		clk_disable_unprepare(cesa->engines[i].clk);
596 		mv_cesa_put_sram(pdev, i);
597 	}
598 
599 	return 0;
600 }
601 
602 static struct platform_driver marvell_cesa = {
603 	.probe		= mv_cesa_probe,
604 	.remove		= mv_cesa_remove,
605 	.driver		= {
606 		.name	= "marvell-cesa",
607 		.of_match_table = mv_cesa_of_match_table,
608 	},
609 };
610 module_platform_driver(marvell_cesa);
611 
612 MODULE_ALIAS("platform:mv_crypto");
613 MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
614 MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
615 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
616 MODULE_LICENSE("GPL v2");
617