• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
3  *
4  * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
5  *
6  * This file add support for AES cipher with 128,192,256 bits
7  * keysize in CBC and ECB mode.
8  * Add support also for DES and 3DES in CBC and ECB mode.
9  *
10  * You could find the datasheet in Documentation/arm/sunxi/README
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  */
17 #include "sun4i-ss.h"
18 
sun4i_ss_opti_poll(struct skcipher_request * areq)19 static int sun4i_ss_opti_poll(struct skcipher_request *areq)
20 {
21 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
22 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
23 	struct sun4i_ss_ctx *ss = op->ss;
24 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
25 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
26 	u32 mode = ctx->mode;
27 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 	u32 rx_cnt = SS_RX_DEFAULT;
29 	u32 tx_cnt = 0;
30 	u32 spaces;
31 	u32 v;
32 	int err = 0;
33 	unsigned int i;
34 	unsigned int ileft = areq->cryptlen;
35 	unsigned int oleft = areq->cryptlen;
36 	unsigned int todo;
37 	struct sg_mapping_iter mi, mo;
38 	unsigned int oi, oo; /* offset for in and out */
39 	unsigned long flags;
40 
41 	if (!areq->cryptlen)
42 		return 0;
43 
44 	if (!areq->iv) {
45 		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
46 		return -EINVAL;
47 	}
48 
49 	if (!areq->src || !areq->dst) {
50 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
51 		return -EINVAL;
52 	}
53 
54 	spin_lock_irqsave(&ss->slock, flags);
55 
56 	for (i = 0; i < op->keylen; i += 4)
57 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
58 
59 	if (areq->iv) {
60 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
61 			v = *(u32 *)(areq->iv + i * 4);
62 			writel(v, ss->base + SS_IV0 + i * 4);
63 		}
64 	}
65 	writel(mode, ss->base + SS_CTL);
66 
67 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
68 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
69 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
70 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
71 	sg_miter_next(&mi);
72 	sg_miter_next(&mo);
73 	if (!mi.addr || !mo.addr) {
74 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
75 		err = -EINVAL;
76 		goto release_ss;
77 	}
78 
79 	ileft = areq->cryptlen / 4;
80 	oleft = areq->cryptlen / 4;
81 	oi = 0;
82 	oo = 0;
83 	do {
84 		todo = min(rx_cnt, ileft);
85 		todo = min_t(size_t, todo, (mi.length - oi) / 4);
86 		if (todo) {
87 			ileft -= todo;
88 			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
89 			oi += todo * 4;
90 		}
91 		if (oi == mi.length) {
92 			sg_miter_next(&mi);
93 			oi = 0;
94 		}
95 
96 		spaces = readl(ss->base + SS_FCSR);
97 		rx_cnt = SS_RXFIFO_SPACES(spaces);
98 		tx_cnt = SS_TXFIFO_SPACES(spaces);
99 
100 		todo = min(tx_cnt, oleft);
101 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
102 		if (todo) {
103 			oleft -= todo;
104 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
105 			oo += todo * 4;
106 		}
107 		if (oo == mo.length) {
108 			sg_miter_next(&mo);
109 			oo = 0;
110 		}
111 	} while (oleft);
112 
113 	if (areq->iv) {
114 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
115 			v = readl(ss->base + SS_IV0 + i * 4);
116 			*(u32 *)(areq->iv + i * 4) = v;
117 		}
118 	}
119 
120 release_ss:
121 	sg_miter_stop(&mi);
122 	sg_miter_stop(&mo);
123 	writel(0, ss->base + SS_CTL);
124 	spin_unlock_irqrestore(&ss->slock, flags);
125 	return err;
126 }
127 
128 /* Generic function that support SG with size not multiple of 4 */
sun4i_ss_cipher_poll(struct skcipher_request * areq)129 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
130 {
131 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
132 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
133 	struct sun4i_ss_ctx *ss = op->ss;
134 	int no_chunk = 1;
135 	struct scatterlist *in_sg = areq->src;
136 	struct scatterlist *out_sg = areq->dst;
137 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
138 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
139 	u32 mode = ctx->mode;
140 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
141 	u32 rx_cnt = SS_RX_DEFAULT;
142 	u32 tx_cnt = 0;
143 	u32 v;
144 	u32 spaces;
145 	int err = 0;
146 	unsigned int i;
147 	unsigned int ileft = areq->cryptlen;
148 	unsigned int oleft = areq->cryptlen;
149 	unsigned int todo;
150 	struct sg_mapping_iter mi, mo;
151 	unsigned int oi, oo;	/* offset for in and out */
152 	char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
153 	char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
154 	unsigned int ob = 0;	/* offset in buf */
155 	unsigned int obo = 0;	/* offset in bufo*/
156 	unsigned int obl = 0;	/* length of data in bufo */
157 	unsigned long flags;
158 
159 	if (!areq->cryptlen)
160 		return 0;
161 
162 	if (!areq->iv) {
163 		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
164 		return -EINVAL;
165 	}
166 
167 	if (!areq->src || !areq->dst) {
168 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
169 		return -EINVAL;
170 	}
171 
172 	/*
173 	 * if we have only SGs with size multiple of 4,
174 	 * we can use the SS optimized function
175 	 */
176 	while (in_sg && no_chunk == 1) {
177 		if (in_sg->length % 4)
178 			no_chunk = 0;
179 		in_sg = sg_next(in_sg);
180 	}
181 	while (out_sg && no_chunk == 1) {
182 		if (out_sg->length % 4)
183 			no_chunk = 0;
184 		out_sg = sg_next(out_sg);
185 	}
186 
187 	if (no_chunk == 1)
188 		return sun4i_ss_opti_poll(areq);
189 
190 	spin_lock_irqsave(&ss->slock, flags);
191 
192 	for (i = 0; i < op->keylen; i += 4)
193 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
194 
195 	if (areq->iv) {
196 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
197 			v = *(u32 *)(areq->iv + i * 4);
198 			writel(v, ss->base + SS_IV0 + i * 4);
199 		}
200 	}
201 	writel(mode, ss->base + SS_CTL);
202 
203 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
204 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
205 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
206 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
207 	sg_miter_next(&mi);
208 	sg_miter_next(&mo);
209 	if (!mi.addr || !mo.addr) {
210 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
211 		err = -EINVAL;
212 		goto release_ss;
213 	}
214 	ileft = areq->cryptlen;
215 	oleft = areq->cryptlen;
216 	oi = 0;
217 	oo = 0;
218 
219 	while (oleft) {
220 		if (ileft) {
221 			/*
222 			 * todo is the number of consecutive 4byte word that we
223 			 * can read from current SG
224 			 */
225 			todo = min(rx_cnt, ileft / 4);
226 			todo = min_t(size_t, todo, (mi.length - oi) / 4);
227 			if (todo && !ob) {
228 				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
229 					todo);
230 				ileft -= todo * 4;
231 				oi += todo * 4;
232 			} else {
233 				/*
234 				 * not enough consecutive bytes, so we need to
235 				 * linearize in buf. todo is in bytes
236 				 * After that copy, if we have a multiple of 4
237 				 * we need to be able to write all buf in one
238 				 * pass, so it is why we min() with rx_cnt
239 				 */
240 				todo = min(rx_cnt * 4 - ob, ileft);
241 				todo = min_t(size_t, todo, mi.length - oi);
242 				memcpy(buf + ob, mi.addr + oi, todo);
243 				ileft -= todo;
244 				oi += todo;
245 				ob += todo;
246 				if (!(ob % 4)) {
247 					writesl(ss->base + SS_RXFIFO, buf,
248 						ob / 4);
249 					ob = 0;
250 				}
251 			}
252 			if (oi == mi.length) {
253 				sg_miter_next(&mi);
254 				oi = 0;
255 			}
256 		}
257 
258 		spaces = readl(ss->base + SS_FCSR);
259 		rx_cnt = SS_RXFIFO_SPACES(spaces);
260 		tx_cnt = SS_TXFIFO_SPACES(spaces);
261 		dev_dbg(ss->dev,
262 			"%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
263 			mode,
264 			oi, mi.length, ileft, areq->cryptlen, rx_cnt,
265 			oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
266 
267 		if (!tx_cnt)
268 			continue;
269 		/* todo in 4bytes word */
270 		todo = min(tx_cnt, oleft / 4);
271 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
272 		if (todo) {
273 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
274 			oleft -= todo * 4;
275 			oo += todo * 4;
276 			if (oo == mo.length) {
277 				sg_miter_next(&mo);
278 				oo = 0;
279 			}
280 		} else {
281 			/*
282 			 * read obl bytes in bufo, we read at maximum for
283 			 * emptying the device
284 			 */
285 			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
286 			obl = tx_cnt * 4;
287 			obo = 0;
288 			do {
289 				/*
290 				 * how many bytes we can copy ?
291 				 * no more than remaining SG size
292 				 * no more than remaining buffer
293 				 * no need to test against oleft
294 				 */
295 				todo = min_t(size_t,
296 					     mo.length - oo, obl - obo);
297 				memcpy(mo.addr + oo, bufo + obo, todo);
298 				oleft -= todo;
299 				obo += todo;
300 				oo += todo;
301 				if (oo == mo.length) {
302 					sg_miter_next(&mo);
303 					oo = 0;
304 				}
305 			} while (obo < obl);
306 			/* bufo must be fully used here */
307 		}
308 	}
309 	if (areq->iv) {
310 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
311 			v = readl(ss->base + SS_IV0 + i * 4);
312 			*(u32 *)(areq->iv + i * 4) = v;
313 		}
314 	}
315 
316 release_ss:
317 	sg_miter_stop(&mi);
318 	sg_miter_stop(&mo);
319 	writel(0, ss->base + SS_CTL);
320 	spin_unlock_irqrestore(&ss->slock, flags);
321 
322 	return err;
323 }
324 
325 /* CBC AES */
sun4i_ss_cbc_aes_encrypt(struct skcipher_request * areq)326 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
327 {
328 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
329 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
330 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
331 
332 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
333 		op->keymode;
334 	return sun4i_ss_cipher_poll(areq);
335 }
336 
sun4i_ss_cbc_aes_decrypt(struct skcipher_request * areq)337 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
338 {
339 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
340 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
341 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
342 
343 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
344 		op->keymode;
345 	return sun4i_ss_cipher_poll(areq);
346 }
347 
348 /* ECB AES */
sun4i_ss_ecb_aes_encrypt(struct skcipher_request * areq)349 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
350 {
351 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
352 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
353 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
354 
355 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
356 		op->keymode;
357 	return sun4i_ss_cipher_poll(areq);
358 }
359 
sun4i_ss_ecb_aes_decrypt(struct skcipher_request * areq)360 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
361 {
362 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
363 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
364 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
365 
366 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
367 		op->keymode;
368 	return sun4i_ss_cipher_poll(areq);
369 }
370 
371 /* CBC DES */
sun4i_ss_cbc_des_encrypt(struct skcipher_request * areq)372 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
373 {
374 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
375 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
376 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
377 
378 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
379 		op->keymode;
380 	return sun4i_ss_cipher_poll(areq);
381 }
382 
sun4i_ss_cbc_des_decrypt(struct skcipher_request * areq)383 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
384 {
385 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
386 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
387 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
388 
389 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
390 		op->keymode;
391 	return sun4i_ss_cipher_poll(areq);
392 }
393 
394 /* ECB DES */
sun4i_ss_ecb_des_encrypt(struct skcipher_request * areq)395 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
396 {
397 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
398 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
399 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
400 
401 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
402 		op->keymode;
403 	return sun4i_ss_cipher_poll(areq);
404 }
405 
sun4i_ss_ecb_des_decrypt(struct skcipher_request * areq)406 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
407 {
408 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
409 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
410 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
411 
412 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
413 		op->keymode;
414 	return sun4i_ss_cipher_poll(areq);
415 }
416 
417 /* CBC 3DES */
sun4i_ss_cbc_des3_encrypt(struct skcipher_request * areq)418 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
419 {
420 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
421 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
422 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
423 
424 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
425 		op->keymode;
426 	return sun4i_ss_cipher_poll(areq);
427 }
428 
sun4i_ss_cbc_des3_decrypt(struct skcipher_request * areq)429 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
430 {
431 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
432 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
433 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
434 
435 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
436 		op->keymode;
437 	return sun4i_ss_cipher_poll(areq);
438 }
439 
440 /* ECB 3DES */
sun4i_ss_ecb_des3_encrypt(struct skcipher_request * areq)441 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
442 {
443 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
444 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
445 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
446 
447 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
448 		op->keymode;
449 	return sun4i_ss_cipher_poll(areq);
450 }
451 
sun4i_ss_ecb_des3_decrypt(struct skcipher_request * areq)452 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
453 {
454 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
455 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
456 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
457 
458 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
459 		op->keymode;
460 	return sun4i_ss_cipher_poll(areq);
461 }
462 
sun4i_ss_cipher_init(struct crypto_tfm * tfm)463 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
464 {
465 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
466 	struct sun4i_ss_alg_template *algt;
467 
468 	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
469 
470 	algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
471 			    alg.crypto.base);
472 	op->ss = algt->ss;
473 
474 	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
475 				    sizeof(struct sun4i_cipher_req_ctx));
476 
477 	return 0;
478 }
479 
480 /* check and set the AES key, prepare the mode to be used */
sun4i_ss_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)481 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
482 			unsigned int keylen)
483 {
484 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
485 	struct sun4i_ss_ctx *ss = op->ss;
486 
487 	switch (keylen) {
488 	case 128 / 8:
489 		op->keymode = SS_AES_128BITS;
490 		break;
491 	case 192 / 8:
492 		op->keymode = SS_AES_192BITS;
493 		break;
494 	case 256 / 8:
495 		op->keymode = SS_AES_256BITS;
496 		break;
497 	default:
498 		dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
499 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
500 		return -EINVAL;
501 	}
502 	op->keylen = keylen;
503 	memcpy(op->key, key, keylen);
504 	return 0;
505 }
506 
507 /* check and set the DES key, prepare the mode to be used */
sun4i_ss_des_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)508 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
509 			unsigned int keylen)
510 {
511 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
512 	struct sun4i_ss_ctx *ss = op->ss;
513 	u32 flags;
514 	u32 tmp[DES_EXPKEY_WORDS];
515 	int ret;
516 
517 	if (unlikely(keylen != DES_KEY_SIZE)) {
518 		dev_err(ss->dev, "Invalid keylen %u\n", keylen);
519 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
520 		return -EINVAL;
521 	}
522 
523 	flags = crypto_skcipher_get_flags(tfm);
524 
525 	ret = des_ekey(tmp, key);
526 	if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
527 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
528 		dev_dbg(ss->dev, "Weak key %u\n", keylen);
529 		return -EINVAL;
530 	}
531 
532 	op->keylen = keylen;
533 	memcpy(op->key, key, keylen);
534 	return 0;
535 }
536 
537 /* check and set the 3DES key, prepare the mode to be used */
sun4i_ss_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)538 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
539 			 unsigned int keylen)
540 {
541 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
542 	struct sun4i_ss_ctx *ss = op->ss;
543 
544 	if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
545 		dev_err(ss->dev, "Invalid keylen %u\n", keylen);
546 		crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
547 		return -EINVAL;
548 	}
549 	op->keylen = keylen;
550 	memcpy(op->key, key, keylen);
551 	return 0;
552 }
553