• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
3  *
4  * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
5  *
6  * This file add support for AES cipher with 128,192,256 bits
7  * keysize in CBC and ECB mode.
8  * Add support also for DES and 3DES in CBC and ECB mode.
9  *
10  * You could find the datasheet in Documentation/arm/sunxi/README
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  */
17 #include "sun4i-ss.h"
18 
sun4i_ss_opti_poll(struct ablkcipher_request * areq)19 static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
20 {
21 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
22 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
23 	struct sun4i_ss_ctx *ss = op->ss;
24 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
25 	struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
26 	u32 mode = ctx->mode;
27 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 	u32 rx_cnt = SS_RX_DEFAULT;
29 	u32 tx_cnt = 0;
30 	u32 spaces;
31 	u32 v;
32 	int i, err = 0;
33 	unsigned int ileft = areq->nbytes;
34 	unsigned int oleft = areq->nbytes;
35 	unsigned int todo;
36 	struct sg_mapping_iter mi, mo;
37 	unsigned int oi, oo; /* offset for in and out */
38 	unsigned long flags;
39 
40 	if (areq->nbytes == 0)
41 		return 0;
42 
43 	if (!areq->info) {
44 		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
45 		return -EINVAL;
46 	}
47 
48 	if (!areq->src || !areq->dst) {
49 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
50 		return -EINVAL;
51 	}
52 
53 	spin_lock_irqsave(&ss->slock, flags);
54 
55 	for (i = 0; i < op->keylen; i += 4)
56 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
57 
58 	if (areq->info) {
59 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
60 			v = *(u32 *)(areq->info + i * 4);
61 			writel(v, ss->base + SS_IV0 + i * 4);
62 		}
63 	}
64 	writel(mode, ss->base + SS_CTL);
65 
66 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
67 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
68 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
69 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
70 	sg_miter_next(&mi);
71 	sg_miter_next(&mo);
72 	if (!mi.addr || !mo.addr) {
73 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
74 		err = -EINVAL;
75 		goto release_ss;
76 	}
77 
78 	ileft = areq->nbytes / 4;
79 	oleft = areq->nbytes / 4;
80 	oi = 0;
81 	oo = 0;
82 	do {
83 		todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
84 		if (todo > 0) {
85 			ileft -= todo;
86 			writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
87 			oi += todo * 4;
88 		}
89 		if (oi == mi.length) {
90 			sg_miter_next(&mi);
91 			oi = 0;
92 		}
93 
94 		spaces = readl(ss->base + SS_FCSR);
95 		rx_cnt = SS_RXFIFO_SPACES(spaces);
96 		tx_cnt = SS_TXFIFO_SPACES(spaces);
97 
98 		todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
99 		if (todo > 0) {
100 			oleft -= todo;
101 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
102 			oo += todo * 4;
103 		}
104 		if (oo == mo.length) {
105 			sg_miter_next(&mo);
106 			oo = 0;
107 		}
108 	} while (oleft > 0);
109 
110 	if (areq->info) {
111 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
112 			v = readl(ss->base + SS_IV0 + i * 4);
113 			*(u32 *)(areq->info + i * 4) = v;
114 		}
115 	}
116 
117 release_ss:
118 	sg_miter_stop(&mi);
119 	sg_miter_stop(&mo);
120 	writel(0, ss->base + SS_CTL);
121 	spin_unlock_irqrestore(&ss->slock, flags);
122 	return err;
123 }
124 
125 /* Generic function that support SG with size not multiple of 4 */
sun4i_ss_cipher_poll(struct ablkcipher_request * areq)126 static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
127 {
128 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
129 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
130 	struct sun4i_ss_ctx *ss = op->ss;
131 	int no_chunk = 1;
132 	struct scatterlist *in_sg = areq->src;
133 	struct scatterlist *out_sg = areq->dst;
134 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
135 	struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
136 	u32 mode = ctx->mode;
137 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
138 	u32 rx_cnt = SS_RX_DEFAULT;
139 	u32 tx_cnt = 0;
140 	u32 v;
141 	u32 spaces;
142 	int i, err = 0;
143 	unsigned int ileft = areq->nbytes;
144 	unsigned int oleft = areq->nbytes;
145 	unsigned int todo;
146 	struct sg_mapping_iter mi, mo;
147 	unsigned int oi, oo;	/* offset for in and out */
148 	char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
149 	char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
150 	unsigned int ob = 0;	/* offset in buf */
151 	unsigned int obo = 0;	/* offset in bufo*/
152 	unsigned int obl = 0;	/* length of data in bufo */
153 	unsigned long flags;
154 
155 	if (areq->nbytes == 0)
156 		return 0;
157 
158 	if (!areq->info) {
159 		dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
160 		return -EINVAL;
161 	}
162 
163 	if (!areq->src || !areq->dst) {
164 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
165 		return -EINVAL;
166 	}
167 
168 	/*
169 	 * if we have only SGs with size multiple of 4,
170 	 * we can use the SS optimized function
171 	 */
172 	while (in_sg && no_chunk == 1) {
173 		if ((in_sg->length % 4) != 0)
174 			no_chunk = 0;
175 		in_sg = sg_next(in_sg);
176 	}
177 	while (out_sg && no_chunk == 1) {
178 		if ((out_sg->length % 4) != 0)
179 			no_chunk = 0;
180 		out_sg = sg_next(out_sg);
181 	}
182 
183 	if (no_chunk == 1)
184 		return sun4i_ss_opti_poll(areq);
185 
186 	spin_lock_irqsave(&ss->slock, flags);
187 
188 	for (i = 0; i < op->keylen; i += 4)
189 		writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
190 
191 	if (areq->info) {
192 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
193 			v = *(u32 *)(areq->info + i * 4);
194 			writel(v, ss->base + SS_IV0 + i * 4);
195 		}
196 	}
197 	writel(mode, ss->base + SS_CTL);
198 
199 	sg_miter_start(&mi, areq->src, sg_nents(areq->src),
200 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
201 	sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
202 		       SG_MITER_TO_SG | SG_MITER_ATOMIC);
203 	sg_miter_next(&mi);
204 	sg_miter_next(&mo);
205 	if (!mi.addr || !mo.addr) {
206 		dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
207 		err = -EINVAL;
208 		goto release_ss;
209 	}
210 	ileft = areq->nbytes;
211 	oleft = areq->nbytes;
212 	oi = 0;
213 	oo = 0;
214 
215 	while (oleft > 0) {
216 		if (ileft > 0) {
217 			/*
218 			 * todo is the number of consecutive 4byte word that we
219 			 * can read from current SG
220 			 */
221 			todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
222 			if (todo > 0 && ob == 0) {
223 				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
224 					todo);
225 				ileft -= todo * 4;
226 				oi += todo * 4;
227 			} else {
228 				/*
229 				 * not enough consecutive bytes, so we need to
230 				 * linearize in buf. todo is in bytes
231 				 * After that copy, if we have a multiple of 4
232 				 * we need to be able to write all buf in one
233 				 * pass, so it is why we min() with rx_cnt
234 				 */
235 				todo = min3(rx_cnt * 4 - ob, ileft,
236 					    mi.length - oi);
237 				memcpy(buf + ob, mi.addr + oi, todo);
238 				ileft -= todo;
239 				oi += todo;
240 				ob += todo;
241 				if (ob % 4 == 0) {
242 					writesl(ss->base + SS_RXFIFO, buf,
243 						ob / 4);
244 					ob = 0;
245 				}
246 			}
247 			if (oi == mi.length) {
248 				sg_miter_next(&mi);
249 				oi = 0;
250 			}
251 		}
252 
253 		spaces = readl(ss->base + SS_FCSR);
254 		rx_cnt = SS_RXFIFO_SPACES(spaces);
255 		tx_cnt = SS_TXFIFO_SPACES(spaces);
256 		dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
257 			mode,
258 			oi, mi.length, ileft, areq->nbytes, rx_cnt,
259 			oo, mo.length, oleft, areq->nbytes, tx_cnt,
260 			todo, ob);
261 
262 		if (tx_cnt == 0)
263 			continue;
264 		/* todo in 4bytes word */
265 		todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
266 		if (todo > 0) {
267 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
268 			oleft -= todo * 4;
269 			oo += todo * 4;
270 			if (oo == mo.length) {
271 				sg_miter_next(&mo);
272 				oo = 0;
273 			}
274 		} else {
275 			/*
276 			 * read obl bytes in bufo, we read at maximum for
277 			 * emptying the device
278 			 */
279 			readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
280 			obl = tx_cnt * 4;
281 			obo = 0;
282 			do {
283 				/*
284 				 * how many bytes we can copy ?
285 				 * no more than remaining SG size
286 				 * no more than remaining buffer
287 				 * no need to test against oleft
288 				 */
289 				todo = min(mo.length - oo, obl - obo);
290 				memcpy(mo.addr + oo, bufo + obo, todo);
291 				oleft -= todo;
292 				obo += todo;
293 				oo += todo;
294 				if (oo == mo.length) {
295 					sg_miter_next(&mo);
296 					oo = 0;
297 				}
298 			} while (obo < obl);
299 			/* bufo must be fully used here */
300 		}
301 	}
302 	if (areq->info) {
303 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
304 			v = readl(ss->base + SS_IV0 + i * 4);
305 			*(u32 *)(areq->info + i * 4) = v;
306 		}
307 	}
308 
309 release_ss:
310 	sg_miter_stop(&mi);
311 	sg_miter_stop(&mo);
312 	writel(0, ss->base + SS_CTL);
313 	spin_unlock_irqrestore(&ss->slock, flags);
314 
315 	return err;
316 }
317 
318 /* CBC AES */
sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request * areq)319 int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
320 {
321 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
322 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
323 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
324 
325 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
326 		op->keymode;
327 	return sun4i_ss_cipher_poll(areq);
328 }
329 
sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request * areq)330 int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
331 {
332 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
333 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
334 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
335 
336 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
337 		op->keymode;
338 	return sun4i_ss_cipher_poll(areq);
339 }
340 
341 /* ECB AES */
sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request * areq)342 int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
343 {
344 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
345 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
346 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
347 
348 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
349 		op->keymode;
350 	return sun4i_ss_cipher_poll(areq);
351 }
352 
sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request * areq)353 int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
354 {
355 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
356 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
357 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
358 
359 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
360 		op->keymode;
361 	return sun4i_ss_cipher_poll(areq);
362 }
363 
364 /* CBC DES */
sun4i_ss_cbc_des_encrypt(struct ablkcipher_request * areq)365 int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
366 {
367 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
368 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
369 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
370 
371 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
372 		op->keymode;
373 	return sun4i_ss_cipher_poll(areq);
374 }
375 
sun4i_ss_cbc_des_decrypt(struct ablkcipher_request * areq)376 int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
377 {
378 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
379 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
380 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
381 
382 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
383 		op->keymode;
384 	return sun4i_ss_cipher_poll(areq);
385 }
386 
387 /* ECB DES */
sun4i_ss_ecb_des_encrypt(struct ablkcipher_request * areq)388 int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
389 {
390 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
391 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
392 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
393 
394 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
395 		op->keymode;
396 	return sun4i_ss_cipher_poll(areq);
397 }
398 
sun4i_ss_ecb_des_decrypt(struct ablkcipher_request * areq)399 int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
400 {
401 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
402 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
403 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
404 
405 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
406 		op->keymode;
407 	return sun4i_ss_cipher_poll(areq);
408 }
409 
410 /* CBC 3DES */
sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request * areq)411 int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
412 {
413 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
414 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
415 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
416 
417 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
418 		op->keymode;
419 	return sun4i_ss_cipher_poll(areq);
420 }
421 
sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request * areq)422 int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
423 {
424 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
425 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
426 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
427 
428 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
429 		op->keymode;
430 	return sun4i_ss_cipher_poll(areq);
431 }
432 
433 /* ECB 3DES */
sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request * areq)434 int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
435 {
436 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
437 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
438 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
439 
440 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
441 		op->keymode;
442 	return sun4i_ss_cipher_poll(areq);
443 }
444 
sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request * areq)445 int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
446 {
447 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
448 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
449 	struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
450 
451 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
452 		op->keymode;
453 	return sun4i_ss_cipher_poll(areq);
454 }
455 
sun4i_ss_cipher_init(struct crypto_tfm * tfm)456 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
457 {
458 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
459 	struct crypto_alg *alg = tfm->__crt_alg;
460 	struct sun4i_ss_alg_template *algt;
461 
462 	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
463 
464 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
465 	op->ss = algt->ss;
466 
467 	tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
468 
469 	return 0;
470 }
471 
472 /* check and set the AES key, prepare the mode to be used */
sun4i_ss_aes_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)473 int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
474 			unsigned int keylen)
475 {
476 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
477 	struct sun4i_ss_ctx *ss = op->ss;
478 
479 	switch (keylen) {
480 	case 128 / 8:
481 		op->keymode = SS_AES_128BITS;
482 		break;
483 	case 192 / 8:
484 		op->keymode = SS_AES_192BITS;
485 		break;
486 	case 256 / 8:
487 		op->keymode = SS_AES_256BITS;
488 		break;
489 	default:
490 		dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
491 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
492 		return -EINVAL;
493 	}
494 	op->keylen = keylen;
495 	memcpy(op->key, key, keylen);
496 	return 0;
497 }
498 
499 /* check and set the DES key, prepare the mode to be used */
sun4i_ss_des_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)500 int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
501 			unsigned int keylen)
502 {
503 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
504 	struct sun4i_ss_ctx *ss = op->ss;
505 	u32 flags;
506 	u32 tmp[DES_EXPKEY_WORDS];
507 	int ret;
508 
509 	if (unlikely(keylen != DES_KEY_SIZE)) {
510 		dev_err(ss->dev, "Invalid keylen %u\n", keylen);
511 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
512 		return -EINVAL;
513 	}
514 
515 	flags = crypto_ablkcipher_get_flags(tfm);
516 
517 	ret = des_ekey(tmp, key);
518 	if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
519 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
520 		dev_dbg(ss->dev, "Weak key %u\n", keylen);
521 		return -EINVAL;
522 	}
523 
524 	op->keylen = keylen;
525 	memcpy(op->key, key, keylen);
526 	return 0;
527 }
528 
529 /* check and set the 3DES key, prepare the mode to be used */
sun4i_ss_des3_setkey(struct crypto_ablkcipher * tfm,const u8 * key,unsigned int keylen)530 int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
531 			 unsigned int keylen)
532 {
533 	struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
534 	struct sun4i_ss_ctx *ss = op->ss;
535 
536 	if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
537 		dev_err(ss->dev, "Invalid keylen %u\n", keylen);
538 		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
539 		return -EINVAL;
540 	}
541 	op->keylen = keylen;
542 	memcpy(op->key, key, keylen);
543 	return 0;
544 }
545