• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Support for Marvell's crypto engine which can be found on some Orion5X
3  * boards.
4  *
5  * Author: Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
6  * License: GPLv2
7  *
8  */
9 #include <crypto/aes.h>
10 #include <crypto/algapi.h>
11 #include <linux/crypto.h>
12 #include <linux/genalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/kthread.h>
16 #include <linux/platform_device.h>
17 #include <linux/scatterlist.h>
18 #include <linux/slab.h>
19 #include <linux/module.h>
20 #include <linux/clk.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/sha.h>
23 #include <linux/of.h>
24 #include <linux/of_platform.h>
25 #include <linux/of_irq.h>
26 
27 #include "mv_cesa.h"
28 
29 #define MV_CESA	"MV-CESA:"
30 #define MAX_HW_HASH_SIZE	0xFFFF
31 #define MV_CESA_EXPIRE		500 /* msec */
32 
33 #define MV_CESA_DEFAULT_SRAM_SIZE	2048
34 
35 /*
36  * STM:
37  *   /---------------------------------------\
38  *   |					     | request complete
39  *  \./					     |
40  * IDLE -> new request -> BUSY -> done -> DEQUEUE
41  *                         /°\               |
42  *			    |		     | more scatter entries
43  *			    \________________/
44  */
45 enum engine_status {
46 	ENGINE_IDLE,
47 	ENGINE_BUSY,
48 	ENGINE_W_DEQUEUE,
49 };
50 
51 /**
52  * struct req_progress - used for every crypt request
53  * @src_sg_it:		sg iterator for src
54  * @dst_sg_it:		sg iterator for dst
55  * @sg_src_left:	bytes left in src to process (scatter list)
56  * @src_start:		offset to add to src start position (scatter list)
57  * @crypt_len:		length of current hw crypt/hash process
58  * @hw_nbytes:		total bytes to process in hw for this request
59  * @copy_back:		whether to copy data back (crypt) or not (hash)
60  * @sg_dst_left:	bytes left dst to process in this scatter list
61  * @dst_start:		offset to add to dst start position (scatter list)
62  * @hw_processed_bytes:	number of bytes processed by hw (request).
63  *
64  * sg helper are used to iterate over the scatterlist. Since the size of the
65  * SRAM may be less than the scatter size, this struct struct is used to keep
66  * track of progress within current scatterlist.
67  */
68 struct req_progress {
69 	struct sg_mapping_iter src_sg_it;
70 	struct sg_mapping_iter dst_sg_it;
71 	void (*complete) (void);
72 	void (*process) (int is_first);
73 
74 	/* src mostly */
75 	int sg_src_left;
76 	int src_start;
77 	int crypt_len;
78 	int hw_nbytes;
79 	/* dst mostly */
80 	int copy_back;
81 	int sg_dst_left;
82 	int dst_start;
83 	int hw_processed_bytes;
84 };
85 
86 struct crypto_priv {
87 	void __iomem *reg;
88 	void __iomem *sram;
89 	struct gen_pool *sram_pool;
90 	dma_addr_t sram_dma;
91 	int irq;
92 	struct clk *clk;
93 	struct task_struct *queue_th;
94 
95 	/* the lock protects queue and eng_st */
96 	spinlock_t lock;
97 	struct crypto_queue queue;
98 	enum engine_status eng_st;
99 	struct timer_list completion_timer;
100 	struct crypto_async_request *cur_req;
101 	struct req_progress p;
102 	int max_req_size;
103 	int sram_size;
104 	int has_sha1;
105 	int has_hmac_sha1;
106 };
107 
108 static struct crypto_priv *cpg;
109 
110 struct mv_ctx {
111 	u8 aes_enc_key[AES_KEY_LEN];
112 	u32 aes_dec_key[8];
113 	int key_len;
114 	u32 need_calc_aes_dkey;
115 };
116 
117 enum crypto_op {
118 	COP_AES_ECB,
119 	COP_AES_CBC,
120 };
121 
122 struct mv_req_ctx {
123 	enum crypto_op op;
124 	int decrypt;
125 };
126 
127 enum hash_op {
128 	COP_SHA1,
129 	COP_HMAC_SHA1
130 };
131 
132 struct mv_tfm_hash_ctx {
133 	struct crypto_shash *fallback;
134 	struct crypto_shash *base_hash;
135 	u32 ivs[2 * SHA1_DIGEST_SIZE / 4];
136 	int count_add;
137 	enum hash_op op;
138 };
139 
140 struct mv_req_hash_ctx {
141 	u64 count;
142 	u32 state[SHA1_DIGEST_SIZE / 4];
143 	u8 buffer[SHA1_BLOCK_SIZE];
144 	int first_hash;		/* marks that we don't have previous state */
145 	int last_chunk;		/* marks that this is the 'final' request */
146 	int extra_bytes;	/* unprocessed bytes in buffer */
147 	enum hash_op op;
148 	int count_add;
149 };
150 
mv_completion_timer_callback(unsigned long unused)151 static void mv_completion_timer_callback(unsigned long unused)
152 {
153 	int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0;
154 
155 	printk(KERN_ERR MV_CESA
156 	       "completion timer expired (CESA %sactive), cleaning up.\n",
157 	       active ? "" : "in");
158 
159 	del_timer(&cpg->completion_timer);
160 	writel(SEC_CMD_DISABLE_SEC, cpg->reg + SEC_ACCEL_CMD);
161 	while(readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_DISABLE_SEC)
162 		printk(KERN_INFO MV_CESA "%s: waiting for engine finishing\n", __func__);
163 	cpg->eng_st = ENGINE_W_DEQUEUE;
164 	wake_up_process(cpg->queue_th);
165 }
166 
mv_setup_timer(void)167 static void mv_setup_timer(void)
168 {
169 	setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0);
170 	mod_timer(&cpg->completion_timer,
171 			jiffies + msecs_to_jiffies(MV_CESA_EXPIRE));
172 }
173 
compute_aes_dec_key(struct mv_ctx * ctx)174 static void compute_aes_dec_key(struct mv_ctx *ctx)
175 {
176 	struct crypto_aes_ctx gen_aes_key;
177 	int key_pos;
178 
179 	if (!ctx->need_calc_aes_dkey)
180 		return;
181 
182 	crypto_aes_expand_key(&gen_aes_key, ctx->aes_enc_key, ctx->key_len);
183 
184 	key_pos = ctx->key_len + 24;
185 	memcpy(ctx->aes_dec_key, &gen_aes_key.key_enc[key_pos], 4 * 4);
186 	switch (ctx->key_len) {
187 	case AES_KEYSIZE_256:
188 		key_pos -= 2;
189 		/* fall */
190 	case AES_KEYSIZE_192:
191 		key_pos -= 2;
192 		memcpy(&ctx->aes_dec_key[4], &gen_aes_key.key_enc[key_pos],
193 				4 * 4);
194 		break;
195 	}
196 	ctx->need_calc_aes_dkey = 0;
197 }
198 
mv_setkey_aes(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int len)199 static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
200 		unsigned int len)
201 {
202 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
203 	struct mv_ctx *ctx = crypto_tfm_ctx(tfm);
204 
205 	switch (len) {
206 	case AES_KEYSIZE_128:
207 	case AES_KEYSIZE_192:
208 	case AES_KEYSIZE_256:
209 		break;
210 	default:
211 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
212 		return -EINVAL;
213 	}
214 	ctx->key_len = len;
215 	ctx->need_calc_aes_dkey = 1;
216 
217 	memcpy(ctx->aes_enc_key, key, AES_KEY_LEN);
218 	return 0;
219 }
220 
copy_src_to_buf(struct req_progress * p,char * dbuf,int len)221 static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
222 {
223 	int ret;
224 	void *sbuf;
225 	int copy_len;
226 
227 	while (len) {
228 		if (!p->sg_src_left) {
229 			ret = sg_miter_next(&p->src_sg_it);
230 			BUG_ON(!ret);
231 			p->sg_src_left = p->src_sg_it.length;
232 			p->src_start = 0;
233 		}
234 
235 		sbuf = p->src_sg_it.addr + p->src_start;
236 
237 		copy_len = min(p->sg_src_left, len);
238 		memcpy(dbuf, sbuf, copy_len);
239 
240 		p->src_start += copy_len;
241 		p->sg_src_left -= copy_len;
242 
243 		len -= copy_len;
244 		dbuf += copy_len;
245 	}
246 }
247 
setup_data_in(void)248 static void setup_data_in(void)
249 {
250 	struct req_progress *p = &cpg->p;
251 	int data_in_sram =
252 	    min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
253 	copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
254 			data_in_sram - p->crypt_len);
255 	p->crypt_len = data_in_sram;
256 }
257 
mv_process_current_q(int first_block)258 static void mv_process_current_q(int first_block)
259 {
260 	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
261 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
262 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
263 	struct sec_accel_config op;
264 
265 	switch (req_ctx->op) {
266 	case COP_AES_ECB:
267 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
268 		break;
269 	case COP_AES_CBC:
270 	default:
271 		op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
272 		op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
273 			ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
274 		if (first_block)
275 			memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16);
276 		break;
277 	}
278 	if (req_ctx->decrypt) {
279 		op.config |= CFG_DIR_DEC;
280 		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_dec_key,
281 				AES_KEY_LEN);
282 	} else {
283 		op.config |= CFG_DIR_ENC;
284 		memcpy(cpg->sram + SRAM_DATA_KEY_P, ctx->aes_enc_key,
285 				AES_KEY_LEN);
286 	}
287 
288 	switch (ctx->key_len) {
289 	case AES_KEYSIZE_128:
290 		op.config |= CFG_AES_LEN_128;
291 		break;
292 	case AES_KEYSIZE_192:
293 		op.config |= CFG_AES_LEN_192;
294 		break;
295 	case AES_KEYSIZE_256:
296 		op.config |= CFG_AES_LEN_256;
297 		break;
298 	}
299 	op.enc_p = ENC_P_SRC(SRAM_DATA_IN_START) |
300 		ENC_P_DST(SRAM_DATA_OUT_START);
301 	op.enc_key_p = SRAM_DATA_KEY_P;
302 
303 	setup_data_in();
304 	op.enc_len = cpg->p.crypt_len;
305 	memcpy(cpg->sram + SRAM_CONFIG, &op,
306 			sizeof(struct sec_accel_config));
307 
308 	/* GO */
309 	mv_setup_timer();
310 	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
311 }
312 
mv_crypto_algo_completion(void)313 static void mv_crypto_algo_completion(void)
314 {
315 	struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
316 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
317 
318 	sg_miter_stop(&cpg->p.src_sg_it);
319 	sg_miter_stop(&cpg->p.dst_sg_it);
320 
321 	if (req_ctx->op != COP_AES_CBC)
322 		return ;
323 
324 	memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16);
325 }
326 
mv_process_hash_current(int first_block)327 static void mv_process_hash_current(int first_block)
328 {
329 	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
330 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
331 	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
332 	struct req_progress *p = &cpg->p;
333 	struct sec_accel_config op = { 0 };
334 	int is_last;
335 
336 	switch (req_ctx->op) {
337 	case COP_SHA1:
338 	default:
339 		op.config = CFG_OP_MAC_ONLY | CFG_MACM_SHA1;
340 		break;
341 	case COP_HMAC_SHA1:
342 		op.config = CFG_OP_MAC_ONLY | CFG_MACM_HMAC_SHA1;
343 		memcpy(cpg->sram + SRAM_HMAC_IV_IN,
344 				tfm_ctx->ivs, sizeof(tfm_ctx->ivs));
345 		break;
346 	}
347 
348 	op.mac_src_p =
349 		MAC_SRC_DATA_P(SRAM_DATA_IN_START) | MAC_SRC_TOTAL_LEN((u32)
350 		req_ctx->
351 		count);
352 
353 	setup_data_in();
354 
355 	op.mac_digest =
356 		MAC_DIGEST_P(SRAM_DIGEST_BUF) | MAC_FRAG_LEN(p->crypt_len);
357 	op.mac_iv =
358 		MAC_INNER_IV_P(SRAM_HMAC_IV_IN) |
359 		MAC_OUTER_IV_P(SRAM_HMAC_IV_OUT);
360 
361 	is_last = req_ctx->last_chunk
362 		&& (p->hw_processed_bytes + p->crypt_len >= p->hw_nbytes)
363 		&& (req_ctx->count <= MAX_HW_HASH_SIZE);
364 	if (req_ctx->first_hash) {
365 		if (is_last)
366 			op.config |= CFG_NOT_FRAG;
367 		else
368 			op.config |= CFG_FIRST_FRAG;
369 
370 		req_ctx->first_hash = 0;
371 	} else {
372 		if (is_last)
373 			op.config |= CFG_LAST_FRAG;
374 		else
375 			op.config |= CFG_MID_FRAG;
376 
377 		if (first_block) {
378 			writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
379 			writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
380 			writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
381 			writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
382 			writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
383 		}
384 	}
385 
386 	memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
387 
388 	/* GO */
389 	mv_setup_timer();
390 	writel(SEC_CMD_EN_SEC_ACCL0, cpg->reg + SEC_ACCEL_CMD);
391 }
392 
mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx * ctx,struct shash_desc * desc)393 static inline int mv_hash_import_sha1_ctx(const struct mv_req_hash_ctx *ctx,
394 					  struct shash_desc *desc)
395 {
396 	int i;
397 	struct sha1_state shash_state;
398 
399 	shash_state.count = ctx->count + ctx->count_add;
400 	for (i = 0; i < 5; i++)
401 		shash_state.state[i] = ctx->state[i];
402 	memcpy(shash_state.buffer, ctx->buffer, sizeof(shash_state.buffer));
403 	return crypto_shash_import(desc, &shash_state);
404 }
405 
mv_hash_final_fallback(struct ahash_request * req)406 static int mv_hash_final_fallback(struct ahash_request *req)
407 {
408 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
409 	struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req);
410 	SHASH_DESC_ON_STACK(shash, tfm_ctx->fallback);
411 	int rc;
412 
413 	shash->tfm = tfm_ctx->fallback;
414 	shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
415 	if (unlikely(req_ctx->first_hash)) {
416 		crypto_shash_init(shash);
417 		crypto_shash_update(shash, req_ctx->buffer,
418 				    req_ctx->extra_bytes);
419 	} else {
420 		/* only SHA1 for now....
421 		 */
422 		rc = mv_hash_import_sha1_ctx(req_ctx, shash);
423 		if (rc)
424 			goto out;
425 	}
426 	rc = crypto_shash_final(shash, req->result);
427 out:
428 	return rc;
429 }
430 
mv_save_digest_state(struct mv_req_hash_ctx * ctx)431 static void mv_save_digest_state(struct mv_req_hash_ctx *ctx)
432 {
433 	ctx->state[0] = readl(cpg->reg + DIGEST_INITIAL_VAL_A);
434 	ctx->state[1] = readl(cpg->reg + DIGEST_INITIAL_VAL_B);
435 	ctx->state[2] = readl(cpg->reg + DIGEST_INITIAL_VAL_C);
436 	ctx->state[3] = readl(cpg->reg + DIGEST_INITIAL_VAL_D);
437 	ctx->state[4] = readl(cpg->reg + DIGEST_INITIAL_VAL_E);
438 }
439 
mv_hash_algo_completion(void)440 static void mv_hash_algo_completion(void)
441 {
442 	struct ahash_request *req = ahash_request_cast(cpg->cur_req);
443 	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
444 
445 	if (ctx->extra_bytes)
446 		copy_src_to_buf(&cpg->p, ctx->buffer, ctx->extra_bytes);
447 	sg_miter_stop(&cpg->p.src_sg_it);
448 
449 	if (likely(ctx->last_chunk)) {
450 		if (likely(ctx->count <= MAX_HW_HASH_SIZE)) {
451 			memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF,
452 			       crypto_ahash_digestsize(crypto_ahash_reqtfm
453 						       (req)));
454 		} else {
455 			mv_save_digest_state(ctx);
456 			mv_hash_final_fallback(req);
457 		}
458 	} else {
459 		mv_save_digest_state(ctx);
460 	}
461 }
462 
dequeue_complete_req(void)463 static void dequeue_complete_req(void)
464 {
465 	struct crypto_async_request *req = cpg->cur_req;
466 	void *buf;
467 	int ret;
468 	cpg->p.hw_processed_bytes += cpg->p.crypt_len;
469 	if (cpg->p.copy_back) {
470 		int need_copy_len = cpg->p.crypt_len;
471 		int sram_offset = 0;
472 		do {
473 			int dst_copy;
474 
475 			if (!cpg->p.sg_dst_left) {
476 				ret = sg_miter_next(&cpg->p.dst_sg_it);
477 				BUG_ON(!ret);
478 				cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
479 				cpg->p.dst_start = 0;
480 			}
481 
482 			buf = cpg->p.dst_sg_it.addr;
483 			buf += cpg->p.dst_start;
484 
485 			dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
486 
487 			memcpy(buf,
488 			       cpg->sram + SRAM_DATA_OUT_START + sram_offset,
489 			       dst_copy);
490 			sram_offset += dst_copy;
491 			cpg->p.sg_dst_left -= dst_copy;
492 			need_copy_len -= dst_copy;
493 			cpg->p.dst_start += dst_copy;
494 		} while (need_copy_len > 0);
495 	}
496 
497 	cpg->p.crypt_len = 0;
498 
499 	BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
500 	if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
501 		/* process next scatter list entry */
502 		cpg->eng_st = ENGINE_BUSY;
503 		cpg->p.process(0);
504 	} else {
505 		cpg->p.complete();
506 		cpg->eng_st = ENGINE_IDLE;
507 		local_bh_disable();
508 		req->complete(req, 0);
509 		local_bh_enable();
510 	}
511 }
512 
count_sgs(struct scatterlist * sl,unsigned int total_bytes)513 static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
514 {
515 	int i = 0;
516 	size_t cur_len;
517 
518 	while (sl) {
519 		cur_len = sl[i].length;
520 		++i;
521 		if (total_bytes > cur_len)
522 			total_bytes -= cur_len;
523 		else
524 			break;
525 	}
526 
527 	return i;
528 }
529 
mv_start_new_crypt_req(struct ablkcipher_request * req)530 static void mv_start_new_crypt_req(struct ablkcipher_request *req)
531 {
532 	struct req_progress *p = &cpg->p;
533 	int num_sgs;
534 
535 	cpg->cur_req = &req->base;
536 	memset(p, 0, sizeof(struct req_progress));
537 	p->hw_nbytes = req->nbytes;
538 	p->complete = mv_crypto_algo_completion;
539 	p->process = mv_process_current_q;
540 	p->copy_back = 1;
541 
542 	num_sgs = count_sgs(req->src, req->nbytes);
543 	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
544 
545 	num_sgs = count_sgs(req->dst, req->nbytes);
546 	sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
547 
548 	mv_process_current_q(1);
549 }
550 
mv_start_new_hash_req(struct ahash_request * req)551 static void mv_start_new_hash_req(struct ahash_request *req)
552 {
553 	struct req_progress *p = &cpg->p;
554 	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
555 	int num_sgs, hw_bytes, old_extra_bytes, rc;
556 	cpg->cur_req = &req->base;
557 	memset(p, 0, sizeof(struct req_progress));
558 	hw_bytes = req->nbytes + ctx->extra_bytes;
559 	old_extra_bytes = ctx->extra_bytes;
560 
561 	ctx->extra_bytes = hw_bytes % SHA1_BLOCK_SIZE;
562 	if (ctx->extra_bytes != 0
563 	    && (!ctx->last_chunk || ctx->count > MAX_HW_HASH_SIZE))
564 		hw_bytes -= ctx->extra_bytes;
565 	else
566 		ctx->extra_bytes = 0;
567 
568 	num_sgs = count_sgs(req->src, req->nbytes);
569 	sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
570 
571 	if (hw_bytes) {
572 		p->hw_nbytes = hw_bytes;
573 		p->complete = mv_hash_algo_completion;
574 		p->process = mv_process_hash_current;
575 
576 		if (unlikely(old_extra_bytes)) {
577 			memcpy(cpg->sram + SRAM_DATA_IN_START, ctx->buffer,
578 			       old_extra_bytes);
579 			p->crypt_len = old_extra_bytes;
580 		}
581 
582 		mv_process_hash_current(1);
583 	} else {
584 		copy_src_to_buf(p, ctx->buffer + old_extra_bytes,
585 				ctx->extra_bytes - old_extra_bytes);
586 		sg_miter_stop(&p->src_sg_it);
587 		if (ctx->last_chunk)
588 			rc = mv_hash_final_fallback(req);
589 		else
590 			rc = 0;
591 		cpg->eng_st = ENGINE_IDLE;
592 		local_bh_disable();
593 		req->base.complete(&req->base, rc);
594 		local_bh_enable();
595 	}
596 }
597 
queue_manag(void * data)598 static int queue_manag(void *data)
599 {
600 	cpg->eng_st = ENGINE_IDLE;
601 	do {
602 		struct crypto_async_request *async_req = NULL;
603 		struct crypto_async_request *backlog = NULL;
604 
605 		__set_current_state(TASK_INTERRUPTIBLE);
606 
607 		if (cpg->eng_st == ENGINE_W_DEQUEUE)
608 			dequeue_complete_req();
609 
610 		spin_lock_irq(&cpg->lock);
611 		if (cpg->eng_st == ENGINE_IDLE) {
612 			backlog = crypto_get_backlog(&cpg->queue);
613 			async_req = crypto_dequeue_request(&cpg->queue);
614 			if (async_req) {
615 				BUG_ON(cpg->eng_st != ENGINE_IDLE);
616 				cpg->eng_st = ENGINE_BUSY;
617 			}
618 		}
619 		spin_unlock_irq(&cpg->lock);
620 
621 		if (backlog) {
622 			backlog->complete(backlog, -EINPROGRESS);
623 			backlog = NULL;
624 		}
625 
626 		if (async_req) {
627 			if (crypto_tfm_alg_type(async_req->tfm) !=
628 			    CRYPTO_ALG_TYPE_AHASH) {
629 				struct ablkcipher_request *req =
630 				    ablkcipher_request_cast(async_req);
631 				mv_start_new_crypt_req(req);
632 			} else {
633 				struct ahash_request *req =
634 				    ahash_request_cast(async_req);
635 				mv_start_new_hash_req(req);
636 			}
637 			async_req = NULL;
638 		}
639 
640 		schedule();
641 
642 	} while (!kthread_should_stop());
643 	return 0;
644 }
645 
mv_handle_req(struct crypto_async_request * req)646 static int mv_handle_req(struct crypto_async_request *req)
647 {
648 	unsigned long flags;
649 	int ret;
650 
651 	spin_lock_irqsave(&cpg->lock, flags);
652 	ret = crypto_enqueue_request(&cpg->queue, req);
653 	spin_unlock_irqrestore(&cpg->lock, flags);
654 	wake_up_process(cpg->queue_th);
655 	return ret;
656 }
657 
mv_enc_aes_ecb(struct ablkcipher_request * req)658 static int mv_enc_aes_ecb(struct ablkcipher_request *req)
659 {
660 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
661 
662 	req_ctx->op = COP_AES_ECB;
663 	req_ctx->decrypt = 0;
664 
665 	return mv_handle_req(&req->base);
666 }
667 
mv_dec_aes_ecb(struct ablkcipher_request * req)668 static int mv_dec_aes_ecb(struct ablkcipher_request *req)
669 {
670 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
671 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
672 
673 	req_ctx->op = COP_AES_ECB;
674 	req_ctx->decrypt = 1;
675 
676 	compute_aes_dec_key(ctx);
677 	return mv_handle_req(&req->base);
678 }
679 
mv_enc_aes_cbc(struct ablkcipher_request * req)680 static int mv_enc_aes_cbc(struct ablkcipher_request *req)
681 {
682 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
683 
684 	req_ctx->op = COP_AES_CBC;
685 	req_ctx->decrypt = 0;
686 
687 	return mv_handle_req(&req->base);
688 }
689 
mv_dec_aes_cbc(struct ablkcipher_request * req)690 static int mv_dec_aes_cbc(struct ablkcipher_request *req)
691 {
692 	struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
693 	struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
694 
695 	req_ctx->op = COP_AES_CBC;
696 	req_ctx->decrypt = 1;
697 
698 	compute_aes_dec_key(ctx);
699 	return mv_handle_req(&req->base);
700 }
701 
mv_cra_init(struct crypto_tfm * tfm)702 static int mv_cra_init(struct crypto_tfm *tfm)
703 {
704 	tfm->crt_ablkcipher.reqsize = sizeof(struct mv_req_ctx);
705 	return 0;
706 }
707 
mv_init_hash_req_ctx(struct mv_req_hash_ctx * ctx,int op,int is_last,unsigned int req_len,int count_add)708 static void mv_init_hash_req_ctx(struct mv_req_hash_ctx *ctx, int op,
709 				 int is_last, unsigned int req_len,
710 				 int count_add)
711 {
712 	memset(ctx, 0, sizeof(*ctx));
713 	ctx->op = op;
714 	ctx->count = req_len;
715 	ctx->first_hash = 1;
716 	ctx->last_chunk = is_last;
717 	ctx->count_add = count_add;
718 }
719 
mv_update_hash_req_ctx(struct mv_req_hash_ctx * ctx,int is_last,unsigned req_len)720 static void mv_update_hash_req_ctx(struct mv_req_hash_ctx *ctx, int is_last,
721 				   unsigned req_len)
722 {
723 	ctx->last_chunk = is_last;
724 	ctx->count += req_len;
725 }
726 
mv_hash_init(struct ahash_request * req)727 static int mv_hash_init(struct ahash_request *req)
728 {
729 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
730 	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0,
731 			     tfm_ctx->count_add);
732 	return 0;
733 }
734 
mv_hash_update(struct ahash_request * req)735 static int mv_hash_update(struct ahash_request *req)
736 {
737 	if (!req->nbytes)
738 		return 0;
739 
740 	mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes);
741 	return mv_handle_req(&req->base);
742 }
743 
mv_hash_final(struct ahash_request * req)744 static int mv_hash_final(struct ahash_request *req)
745 {
746 	struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
747 
748 	ahash_request_set_crypt(req, NULL, req->result, 0);
749 	mv_update_hash_req_ctx(ctx, 1, 0);
750 	return mv_handle_req(&req->base);
751 }
752 
mv_hash_finup(struct ahash_request * req)753 static int mv_hash_finup(struct ahash_request *req)
754 {
755 	mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes);
756 	return mv_handle_req(&req->base);
757 }
758 
mv_hash_digest(struct ahash_request * req)759 static int mv_hash_digest(struct ahash_request *req)
760 {
761 	const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm);
762 	mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1,
763 			     req->nbytes, tfm_ctx->count_add);
764 	return mv_handle_req(&req->base);
765 }
766 
mv_hash_init_ivs(struct mv_tfm_hash_ctx * ctx,const void * istate,const void * ostate)767 static void mv_hash_init_ivs(struct mv_tfm_hash_ctx *ctx, const void *istate,
768 			     const void *ostate)
769 {
770 	const struct sha1_state *isha1_state = istate, *osha1_state = ostate;
771 	int i;
772 	for (i = 0; i < 5; i++) {
773 		ctx->ivs[i] = cpu_to_be32(isha1_state->state[i]);
774 		ctx->ivs[i + 5] = cpu_to_be32(osha1_state->state[i]);
775 	}
776 }
777 
mv_hash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)778 static int mv_hash_setkey(struct crypto_ahash *tfm, const u8 * key,
779 			  unsigned int keylen)
780 {
781 	int rc;
782 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(&tfm->base);
783 	int bs, ds, ss;
784 
785 	if (!ctx->base_hash)
786 		return 0;
787 
788 	rc = crypto_shash_setkey(ctx->fallback, key, keylen);
789 	if (rc)
790 		return rc;
791 
792 	/* Can't see a way to extract the ipad/opad from the fallback tfm
793 	   so I'm basically copying code from the hmac module */
794 	bs = crypto_shash_blocksize(ctx->base_hash);
795 	ds = crypto_shash_digestsize(ctx->base_hash);
796 	ss = crypto_shash_statesize(ctx->base_hash);
797 
798 	{
799 		SHASH_DESC_ON_STACK(shash, ctx->base_hash);
800 
801 		unsigned int i;
802 		char ipad[ss];
803 		char opad[ss];
804 
805 		shash->tfm = ctx->base_hash;
806 		shash->flags = crypto_shash_get_flags(ctx->base_hash) &
807 		    CRYPTO_TFM_REQ_MAY_SLEEP;
808 
809 		if (keylen > bs) {
810 			int err;
811 
812 			err =
813 			    crypto_shash_digest(shash, key, keylen, ipad);
814 			if (err)
815 				return err;
816 
817 			keylen = ds;
818 		} else
819 			memcpy(ipad, key, keylen);
820 
821 		memset(ipad + keylen, 0, bs - keylen);
822 		memcpy(opad, ipad, bs);
823 
824 		for (i = 0; i < bs; i++) {
825 			ipad[i] ^= 0x36;
826 			opad[i] ^= 0x5c;
827 		}
828 
829 		rc = crypto_shash_init(shash) ? :
830 		    crypto_shash_update(shash, ipad, bs) ? :
831 		    crypto_shash_export(shash, ipad) ? :
832 		    crypto_shash_init(shash) ? :
833 		    crypto_shash_update(shash, opad, bs) ? :
834 		    crypto_shash_export(shash, opad);
835 
836 		if (rc == 0)
837 			mv_hash_init_ivs(ctx, ipad, opad);
838 
839 		return rc;
840 	}
841 }
842 
mv_cra_hash_init(struct crypto_tfm * tfm,const char * base_hash_name,enum hash_op op,int count_add)843 static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name,
844 			    enum hash_op op, int count_add)
845 {
846 	const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
847 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
848 	struct crypto_shash *fallback_tfm = NULL;
849 	struct crypto_shash *base_hash = NULL;
850 	int err = -ENOMEM;
851 
852 	ctx->op = op;
853 	ctx->count_add = count_add;
854 
855 	/* Allocate a fallback and abort if it failed. */
856 	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
857 					  CRYPTO_ALG_NEED_FALLBACK);
858 	if (IS_ERR(fallback_tfm)) {
859 		printk(KERN_WARNING MV_CESA
860 		       "Fallback driver '%s' could not be loaded!\n",
861 		       fallback_driver_name);
862 		err = PTR_ERR(fallback_tfm);
863 		goto out;
864 	}
865 	ctx->fallback = fallback_tfm;
866 
867 	if (base_hash_name) {
868 		/* Allocate a hash to compute the ipad/opad of hmac. */
869 		base_hash = crypto_alloc_shash(base_hash_name, 0,
870 					       CRYPTO_ALG_NEED_FALLBACK);
871 		if (IS_ERR(base_hash)) {
872 			printk(KERN_WARNING MV_CESA
873 			       "Base driver '%s' could not be loaded!\n",
874 			       base_hash_name);
875 			err = PTR_ERR(base_hash);
876 			goto err_bad_base;
877 		}
878 	}
879 	ctx->base_hash = base_hash;
880 
881 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
882 				 sizeof(struct mv_req_hash_ctx) +
883 				 crypto_shash_descsize(ctx->fallback));
884 	return 0;
885 err_bad_base:
886 	crypto_free_shash(fallback_tfm);
887 out:
888 	return err;
889 }
890 
mv_cra_hash_exit(struct crypto_tfm * tfm)891 static void mv_cra_hash_exit(struct crypto_tfm *tfm)
892 {
893 	struct mv_tfm_hash_ctx *ctx = crypto_tfm_ctx(tfm);
894 
895 	crypto_free_shash(ctx->fallback);
896 	if (ctx->base_hash)
897 		crypto_free_shash(ctx->base_hash);
898 }
899 
mv_cra_hash_sha1_init(struct crypto_tfm * tfm)900 static int mv_cra_hash_sha1_init(struct crypto_tfm *tfm)
901 {
902 	return mv_cra_hash_init(tfm, NULL, COP_SHA1, 0);
903 }
904 
mv_cra_hash_hmac_sha1_init(struct crypto_tfm * tfm)905 static int mv_cra_hash_hmac_sha1_init(struct crypto_tfm *tfm)
906 {
907 	return mv_cra_hash_init(tfm, "sha1", COP_HMAC_SHA1, SHA1_BLOCK_SIZE);
908 }
909 
crypto_int(int irq,void * priv)910 static irqreturn_t crypto_int(int irq, void *priv)
911 {
912 	u32 val;
913 
914 	val = readl(cpg->reg + SEC_ACCEL_INT_STATUS);
915 	if (!(val & SEC_INT_ACCEL0_DONE))
916 		return IRQ_NONE;
917 
918 	if (!del_timer(&cpg->completion_timer)) {
919 		printk(KERN_WARNING MV_CESA
920 		       "got an interrupt but no pending timer?\n");
921 	}
922 	val &= ~SEC_INT_ACCEL0_DONE;
923 	writel(val, cpg->reg + FPGA_INT_STATUS);
924 	writel(val, cpg->reg + SEC_ACCEL_INT_STATUS);
925 	BUG_ON(cpg->eng_st != ENGINE_BUSY);
926 	cpg->eng_st = ENGINE_W_DEQUEUE;
927 	wake_up_process(cpg->queue_th);
928 	return IRQ_HANDLED;
929 }
930 
931 static struct crypto_alg mv_aes_alg_ecb = {
932 	.cra_name		= "ecb(aes)",
933 	.cra_driver_name	= "mv-ecb-aes",
934 	.cra_priority	= 300,
935 	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
936 			  CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
937 	.cra_blocksize	= 16,
938 	.cra_ctxsize	= sizeof(struct mv_ctx),
939 	.cra_alignmask	= 0,
940 	.cra_type	= &crypto_ablkcipher_type,
941 	.cra_module	= THIS_MODULE,
942 	.cra_init	= mv_cra_init,
943 	.cra_u		= {
944 		.ablkcipher = {
945 			.min_keysize	=	AES_MIN_KEY_SIZE,
946 			.max_keysize	=	AES_MAX_KEY_SIZE,
947 			.setkey		=	mv_setkey_aes,
948 			.encrypt	=	mv_enc_aes_ecb,
949 			.decrypt	=	mv_dec_aes_ecb,
950 		},
951 	},
952 };
953 
954 static struct crypto_alg mv_aes_alg_cbc = {
955 	.cra_name		= "cbc(aes)",
956 	.cra_driver_name	= "mv-cbc-aes",
957 	.cra_priority	= 300,
958 	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
959 			  CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
960 	.cra_blocksize	= AES_BLOCK_SIZE,
961 	.cra_ctxsize	= sizeof(struct mv_ctx),
962 	.cra_alignmask	= 0,
963 	.cra_type	= &crypto_ablkcipher_type,
964 	.cra_module	= THIS_MODULE,
965 	.cra_init	= mv_cra_init,
966 	.cra_u		= {
967 		.ablkcipher = {
968 			.ivsize		=	AES_BLOCK_SIZE,
969 			.min_keysize	=	AES_MIN_KEY_SIZE,
970 			.max_keysize	=	AES_MAX_KEY_SIZE,
971 			.setkey		=	mv_setkey_aes,
972 			.encrypt	=	mv_enc_aes_cbc,
973 			.decrypt	=	mv_dec_aes_cbc,
974 		},
975 	},
976 };
977 
978 static struct ahash_alg mv_sha1_alg = {
979 	.init = mv_hash_init,
980 	.update = mv_hash_update,
981 	.final = mv_hash_final,
982 	.finup = mv_hash_finup,
983 	.digest = mv_hash_digest,
984 	.halg = {
985 		 .digestsize = SHA1_DIGEST_SIZE,
986 		 .base = {
987 			  .cra_name = "sha1",
988 			  .cra_driver_name = "mv-sha1",
989 			  .cra_priority = 300,
990 			  .cra_flags =
991 			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
992 			  CRYPTO_ALG_NEED_FALLBACK,
993 			  .cra_blocksize = SHA1_BLOCK_SIZE,
994 			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
995 			  .cra_init = mv_cra_hash_sha1_init,
996 			  .cra_exit = mv_cra_hash_exit,
997 			  .cra_module = THIS_MODULE,
998 			  }
999 		 }
1000 };
1001 
1002 static struct ahash_alg mv_hmac_sha1_alg = {
1003 	.init = mv_hash_init,
1004 	.update = mv_hash_update,
1005 	.final = mv_hash_final,
1006 	.finup = mv_hash_finup,
1007 	.digest = mv_hash_digest,
1008 	.setkey = mv_hash_setkey,
1009 	.halg = {
1010 		 .digestsize = SHA1_DIGEST_SIZE,
1011 		 .base = {
1012 			  .cra_name = "hmac(sha1)",
1013 			  .cra_driver_name = "mv-hmac-sha1",
1014 			  .cra_priority = 300,
1015 			  .cra_flags =
1016 			  CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
1017 			  CRYPTO_ALG_NEED_FALLBACK,
1018 			  .cra_blocksize = SHA1_BLOCK_SIZE,
1019 			  .cra_ctxsize = sizeof(struct mv_tfm_hash_ctx),
1020 			  .cra_init = mv_cra_hash_hmac_sha1_init,
1021 			  .cra_exit = mv_cra_hash_exit,
1022 			  .cra_module = THIS_MODULE,
1023 			  }
1024 		 }
1025 };
1026 
mv_cesa_get_sram(struct platform_device * pdev,struct crypto_priv * cp)1027 static int mv_cesa_get_sram(struct platform_device *pdev,
1028 			    struct crypto_priv *cp)
1029 {
1030 	struct resource *res;
1031 	u32 sram_size = MV_CESA_DEFAULT_SRAM_SIZE;
1032 
1033 	of_property_read_u32(pdev->dev.of_node, "marvell,crypto-sram-size",
1034 			     &sram_size);
1035 
1036 	cp->sram_size = sram_size;
1037 	cp->sram_pool = of_gen_pool_get(pdev->dev.of_node,
1038 					"marvell,crypto-srams", 0);
1039 	if (cp->sram_pool) {
1040 		cp->sram = gen_pool_dma_alloc(cp->sram_pool, sram_size,
1041 					      &cp->sram_dma);
1042 		if (cp->sram)
1043 			return 0;
1044 
1045 		return -ENOMEM;
1046 	}
1047 
1048 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1049 					   "sram");
1050 	if (!res || resource_size(res) < cp->sram_size)
1051 		return -EINVAL;
1052 
1053 	cp->sram = devm_ioremap_resource(&pdev->dev, res);
1054 	if (IS_ERR(cp->sram))
1055 		return PTR_ERR(cp->sram);
1056 
1057 	return 0;
1058 }
1059 
mv_probe(struct platform_device * pdev)1060 static int mv_probe(struct platform_device *pdev)
1061 {
1062 	struct crypto_priv *cp;
1063 	struct resource *res;
1064 	int irq;
1065 	int ret;
1066 
1067 	if (cpg) {
1068 		printk(KERN_ERR MV_CESA "Second crypto dev?\n");
1069 		return -EEXIST;
1070 	}
1071 
1072 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1073 	if (!res)
1074 		return -ENXIO;
1075 
1076 	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1077 	if (!cp)
1078 		return -ENOMEM;
1079 
1080 	spin_lock_init(&cp->lock);
1081 	crypto_init_queue(&cp->queue, 50);
1082 	cp->reg = devm_ioremap_resource(&pdev->dev, res);
1083 	if (IS_ERR(cp->reg)) {
1084 		ret = PTR_ERR(cp->reg);
1085 		goto err;
1086 	}
1087 
1088 	ret = mv_cesa_get_sram(pdev, cp);
1089 	if (ret)
1090 		goto err;
1091 
1092 	cp->max_req_size = cp->sram_size - SRAM_CFG_SPACE;
1093 
1094 	irq = platform_get_irq(pdev, 0);
1095 	if (irq < 0) {
1096 		ret = irq;
1097 		goto err;
1098 	}
1099 	cp->irq = irq;
1100 
1101 	platform_set_drvdata(pdev, cp);
1102 	cpg = cp;
1103 
1104 	cp->queue_th = kthread_run(queue_manag, cp, "mv_crypto");
1105 	if (IS_ERR(cp->queue_th)) {
1106 		ret = PTR_ERR(cp->queue_th);
1107 		goto err;
1108 	}
1109 
1110 	ret = request_irq(irq, crypto_int, 0, dev_name(&pdev->dev),
1111 			cp);
1112 	if (ret)
1113 		goto err_thread;
1114 
1115 	/* Not all platforms can gate the clock, so it is not
1116 	   an error if the clock does not exists. */
1117 	cp->clk = clk_get(&pdev->dev, NULL);
1118 	if (!IS_ERR(cp->clk))
1119 		clk_prepare_enable(cp->clk);
1120 
1121 	writel(0, cpg->reg + SEC_ACCEL_INT_STATUS);
1122 	writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1123 	writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1124 	writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
1125 
1126 	ret = crypto_register_alg(&mv_aes_alg_ecb);
1127 	if (ret) {
1128 		printk(KERN_WARNING MV_CESA
1129 		       "Could not register aes-ecb driver\n");
1130 		goto err_irq;
1131 	}
1132 
1133 	ret = crypto_register_alg(&mv_aes_alg_cbc);
1134 	if (ret) {
1135 		printk(KERN_WARNING MV_CESA
1136 		       "Could not register aes-cbc driver\n");
1137 		goto err_unreg_ecb;
1138 	}
1139 
1140 	ret = crypto_register_ahash(&mv_sha1_alg);
1141 	if (ret == 0)
1142 		cpg->has_sha1 = 1;
1143 	else
1144 		printk(KERN_WARNING MV_CESA "Could not register sha1 driver\n");
1145 
1146 	ret = crypto_register_ahash(&mv_hmac_sha1_alg);
1147 	if (ret == 0) {
1148 		cpg->has_hmac_sha1 = 1;
1149 	} else {
1150 		printk(KERN_WARNING MV_CESA
1151 		       "Could not register hmac-sha1 driver\n");
1152 	}
1153 
1154 	return 0;
1155 err_unreg_ecb:
1156 	crypto_unregister_alg(&mv_aes_alg_ecb);
1157 err_irq:
1158 	free_irq(irq, cp);
1159 	if (!IS_ERR(cp->clk)) {
1160 		clk_disable_unprepare(cp->clk);
1161 		clk_put(cp->clk);
1162 	}
1163 err_thread:
1164 	kthread_stop(cp->queue_th);
1165 err:
1166 	kfree(cp);
1167 	cpg = NULL;
1168 	return ret;
1169 }
1170 
mv_remove(struct platform_device * pdev)1171 static int mv_remove(struct platform_device *pdev)
1172 {
1173 	struct crypto_priv *cp = platform_get_drvdata(pdev);
1174 
1175 	crypto_unregister_alg(&mv_aes_alg_ecb);
1176 	crypto_unregister_alg(&mv_aes_alg_cbc);
1177 	if (cp->has_sha1)
1178 		crypto_unregister_ahash(&mv_sha1_alg);
1179 	if (cp->has_hmac_sha1)
1180 		crypto_unregister_ahash(&mv_hmac_sha1_alg);
1181 	kthread_stop(cp->queue_th);
1182 	free_irq(cp->irq, cp);
1183 	memset(cp->sram, 0, cp->sram_size);
1184 
1185 	if (!IS_ERR(cp->clk)) {
1186 		clk_disable_unprepare(cp->clk);
1187 		clk_put(cp->clk);
1188 	}
1189 
1190 	kfree(cp);
1191 	cpg = NULL;
1192 	return 0;
1193 }
1194 
1195 static const struct of_device_id mv_cesa_of_match_table[] = {
1196 	{ .compatible = "marvell,orion-crypto", },
1197 	{ .compatible = "marvell,kirkwood-crypto", },
1198 	{ .compatible = "marvell,dove-crypto", },
1199 	{}
1200 };
1201 MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table);
1202 
1203 static struct platform_driver marvell_crypto = {
1204 	.probe		= mv_probe,
1205 	.remove		= mv_remove,
1206 	.driver		= {
1207 		.name	= "mv_crypto",
1208 		.of_match_table = mv_cesa_of_match_table,
1209 	},
1210 };
1211 MODULE_ALIAS("platform:mv_crypto");
1212 
1213 module_platform_driver(marvell_crypto);
1214 
1215 MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
1216 MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
1217 MODULE_LICENSE("GPL");
1218