• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
4  *
5  * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
6  *
7  * This file add support for AES cipher with 128,192,256 bits
8  * keysize in CBC and ECB mode.
9  * Add support also for DES and 3DES in CBC and ECB mode.
10  *
11  * You could find the datasheet in Documentation/arm/sunxi.rst
12  */
13 #include "sun4i-ss.h"
14 
sun4i_ss_opti_poll(struct skcipher_request * areq)15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
16 {
17 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 	struct sun4i_ss_ctx *ss = op->ss;
20 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
22 	u32 mode = ctx->mode;
23 	void *backup_iv = NULL;
24 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
25 	u32 rx_cnt = SS_RX_DEFAULT;
26 	u32 tx_cnt = 0;
27 	u32 spaces;
28 	u32 v;
29 	int err = 0;
30 	unsigned int i;
31 	unsigned int ileft = areq->cryptlen;
32 	unsigned int oleft = areq->cryptlen;
33 	unsigned int todo;
34 	unsigned long pi = 0, po = 0; /* progress for in and out */
35 	bool miter_err;
36 	struct sg_mapping_iter mi, mo;
37 	unsigned int oi, oo; /* offset for in and out */
38 	unsigned long flags;
39 
40 	if (!areq->cryptlen)
41 		return 0;
42 
43 	if (!areq->src || !areq->dst) {
44 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
45 		return -EINVAL;
46 	}
47 
48 	if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
49 		backup_iv = kzalloc(ivsize, GFP_KERNEL);
50 		if (!backup_iv)
51 			return -ENOMEM;
52 		scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
53 	}
54 
55 	spin_lock_irqsave(&ss->slock, flags);
56 
57 	for (i = 0; i < op->keylen / 4; i++)
58 		writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
59 
60 	if (areq->iv) {
61 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
62 			v = *(u32 *)(areq->iv + i * 4);
63 			writesl(ss->base + SS_IV0 + i * 4, &v, 1);
64 		}
65 	}
66 	writel(mode, ss->base + SS_CTL);
67 
68 
69 	ileft = areq->cryptlen / 4;
70 	oleft = areq->cryptlen / 4;
71 	oi = 0;
72 	oo = 0;
73 	do {
74 		if (ileft) {
75 			sg_miter_start(&mi, areq->src, sg_nents(areq->src),
76 					SG_MITER_FROM_SG | SG_MITER_ATOMIC);
77 			if (pi)
78 				sg_miter_skip(&mi, pi);
79 			miter_err = sg_miter_next(&mi);
80 			if (!miter_err || !mi.addr) {
81 				dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
82 				err = -EINVAL;
83 				goto release_ss;
84 			}
85 			todo = min(rx_cnt, ileft);
86 			todo = min_t(size_t, todo, (mi.length - oi) / 4);
87 			if (todo) {
88 				ileft -= todo;
89 				writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
90 				oi += todo * 4;
91 			}
92 			if (oi == mi.length) {
93 				pi += mi.length;
94 				oi = 0;
95 			}
96 			sg_miter_stop(&mi);
97 		}
98 
99 		spaces = readl(ss->base + SS_FCSR);
100 		rx_cnt = SS_RXFIFO_SPACES(spaces);
101 		tx_cnt = SS_TXFIFO_SPACES(spaces);
102 
103 		sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
104 			       SG_MITER_TO_SG | SG_MITER_ATOMIC);
105 		if (po)
106 			sg_miter_skip(&mo, po);
107 		miter_err = sg_miter_next(&mo);
108 		if (!miter_err || !mo.addr) {
109 			dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
110 			err = -EINVAL;
111 			goto release_ss;
112 		}
113 		todo = min(tx_cnt, oleft);
114 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
115 		if (todo) {
116 			oleft -= todo;
117 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
118 			oo += todo * 4;
119 		}
120 		if (oo == mo.length) {
121 			oo = 0;
122 			po += mo.length;
123 		}
124 		sg_miter_stop(&mo);
125 	} while (oleft);
126 
127 	if (areq->iv) {
128 		if (mode & SS_DECRYPTION) {
129 			memcpy(areq->iv, backup_iv, ivsize);
130 			kfree_sensitive(backup_iv);
131 		} else {
132 			scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
133 						 ivsize, 0);
134 		}
135 	}
136 
137 release_ss:
138 	writel(0, ss->base + SS_CTL);
139 	spin_unlock_irqrestore(&ss->slock, flags);
140 	return err;
141 }
142 
143 
sun4i_ss_cipher_poll_fallback(struct skcipher_request * areq)144 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
145 {
146 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
147 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
148 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
149 	int err;
150 
151 	skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
152 	skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
153 				      areq->base.complete, areq->base.data);
154 	skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
155 				   areq->cryptlen, areq->iv);
156 	if (ctx->mode & SS_DECRYPTION)
157 		err = crypto_skcipher_decrypt(&ctx->fallback_req);
158 	else
159 		err = crypto_skcipher_encrypt(&ctx->fallback_req);
160 
161 	return err;
162 }
163 
164 /* Generic function that support SG with size not multiple of 4 */
sun4i_ss_cipher_poll(struct skcipher_request * areq)165 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
166 {
167 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
168 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
169 	struct sun4i_ss_ctx *ss = op->ss;
170 	int no_chunk = 1;
171 	struct scatterlist *in_sg = areq->src;
172 	struct scatterlist *out_sg = areq->dst;
173 	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
174 	struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
175 	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
176 	struct sun4i_ss_alg_template *algt;
177 	u32 mode = ctx->mode;
178 	/* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
179 	u32 rx_cnt = SS_RX_DEFAULT;
180 	u32 tx_cnt = 0;
181 	u32 v;
182 	u32 spaces;
183 	int err = 0;
184 	unsigned int i;
185 	unsigned int ileft = areq->cryptlen;
186 	unsigned int oleft = areq->cryptlen;
187 	unsigned int todo;
188 	void *backup_iv = NULL;
189 	struct sg_mapping_iter mi, mo;
190 	unsigned long pi = 0, po = 0; /* progress for in and out */
191 	bool miter_err;
192 	unsigned int oi, oo;	/* offset for in and out */
193 	unsigned int ob = 0;	/* offset in buf */
194 	unsigned int obo = 0;	/* offset in bufo*/
195 	unsigned int obl = 0;	/* length of data in bufo */
196 	unsigned long flags;
197 	bool need_fallback = false;
198 
199 	if (!areq->cryptlen)
200 		return 0;
201 
202 	if (!areq->src || !areq->dst) {
203 		dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
204 		return -EINVAL;
205 	}
206 
207 	algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
208 	if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
209 		need_fallback = true;
210 
211 	/*
212 	 * if we have only SGs with size multiple of 4,
213 	 * we can use the SS optimized function
214 	 */
215 	while (in_sg && no_chunk == 1) {
216 		if ((in_sg->length | in_sg->offset) & 3u)
217 			no_chunk = 0;
218 		in_sg = sg_next(in_sg);
219 	}
220 	while (out_sg && no_chunk == 1) {
221 		if ((out_sg->length | out_sg->offset) & 3u)
222 			no_chunk = 0;
223 		out_sg = sg_next(out_sg);
224 	}
225 
226 	if (no_chunk == 1 && !need_fallback)
227 		return sun4i_ss_opti_poll(areq);
228 
229 	if (need_fallback)
230 		return sun4i_ss_cipher_poll_fallback(areq);
231 
232 	if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
233 		backup_iv = kzalloc(ivsize, GFP_KERNEL);
234 		if (!backup_iv)
235 			return -ENOMEM;
236 		scatterwalk_map_and_copy(backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0);
237 	}
238 
239 	spin_lock_irqsave(&ss->slock, flags);
240 
241 	for (i = 0; i < op->keylen / 4; i++)
242 		writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
243 
244 	if (areq->iv) {
245 		for (i = 0; i < 4 && i < ivsize / 4; i++) {
246 			v = *(u32 *)(areq->iv + i * 4);
247 			writesl(ss->base + SS_IV0 + i * 4, &v, 1);
248 		}
249 	}
250 	writel(mode, ss->base + SS_CTL);
251 
252 	ileft = areq->cryptlen;
253 	oleft = areq->cryptlen;
254 	oi = 0;
255 	oo = 0;
256 
257 	while (oleft) {
258 		if (ileft) {
259 			sg_miter_start(&mi, areq->src, sg_nents(areq->src),
260 				       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
261 			if (pi)
262 				sg_miter_skip(&mi, pi);
263 			miter_err = sg_miter_next(&mi);
264 			if (!miter_err || !mi.addr) {
265 				dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
266 				err = -EINVAL;
267 				goto release_ss;
268 			}
269 			/*
270 			 * todo is the number of consecutive 4byte word that we
271 			 * can read from current SG
272 			 */
273 			todo = min(rx_cnt, ileft / 4);
274 			todo = min_t(size_t, todo, (mi.length - oi) / 4);
275 			if (todo && !ob) {
276 				writesl(ss->base + SS_RXFIFO, mi.addr + oi,
277 					todo);
278 				ileft -= todo * 4;
279 				oi += todo * 4;
280 			} else {
281 				/*
282 				 * not enough consecutive bytes, so we need to
283 				 * linearize in buf. todo is in bytes
284 				 * After that copy, if we have a multiple of 4
285 				 * we need to be able to write all buf in one
286 				 * pass, so it is why we min() with rx_cnt
287 				 */
288 				todo = min(rx_cnt * 4 - ob, ileft);
289 				todo = min_t(size_t, todo, mi.length - oi);
290 				memcpy(ss->buf + ob, mi.addr + oi, todo);
291 				ileft -= todo;
292 				oi += todo;
293 				ob += todo;
294 				if (!(ob % 4)) {
295 					writesl(ss->base + SS_RXFIFO, ss->buf,
296 						ob / 4);
297 					ob = 0;
298 				}
299 			}
300 			if (oi == mi.length) {
301 				pi += mi.length;
302 				oi = 0;
303 			}
304 			sg_miter_stop(&mi);
305 		}
306 
307 		spaces = readl(ss->base + SS_FCSR);
308 		rx_cnt = SS_RXFIFO_SPACES(spaces);
309 		tx_cnt = SS_TXFIFO_SPACES(spaces);
310 
311 		if (!tx_cnt)
312 			continue;
313 		sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
314 			       SG_MITER_TO_SG | SG_MITER_ATOMIC);
315 		if (po)
316 			sg_miter_skip(&mo, po);
317 		miter_err = sg_miter_next(&mo);
318 		if (!miter_err || !mo.addr) {
319 			dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
320 			err = -EINVAL;
321 			goto release_ss;
322 		}
323 		/* todo in 4bytes word */
324 		todo = min(tx_cnt, oleft / 4);
325 		todo = min_t(size_t, todo, (mo.length - oo) / 4);
326 
327 		if (todo) {
328 			readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
329 			oleft -= todo * 4;
330 			oo += todo * 4;
331 			if (oo == mo.length) {
332 				po += mo.length;
333 				oo = 0;
334 			}
335 		} else {
336 			/*
337 			 * read obl bytes in bufo, we read at maximum for
338 			 * emptying the device
339 			 */
340 			readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
341 			obl = tx_cnt * 4;
342 			obo = 0;
343 			do {
344 				/*
345 				 * how many bytes we can copy ?
346 				 * no more than remaining SG size
347 				 * no more than remaining buffer
348 				 * no need to test against oleft
349 				 */
350 				todo = min_t(size_t,
351 					     mo.length - oo, obl - obo);
352 				memcpy(mo.addr + oo, ss->bufo + obo, todo);
353 				oleft -= todo;
354 				obo += todo;
355 				oo += todo;
356 				if (oo == mo.length) {
357 					po += mo.length;
358 					sg_miter_next(&mo);
359 					oo = 0;
360 				}
361 			} while (obo < obl);
362 			/* bufo must be fully used here */
363 		}
364 		sg_miter_stop(&mo);
365 	}
366 	if (areq->iv) {
367 		if (mode & SS_DECRYPTION) {
368 			memcpy(areq->iv, backup_iv, ivsize);
369 			kfree_sensitive(backup_iv);
370 		} else {
371 			scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
372 						 ivsize, 0);
373 		}
374 	}
375 
376 release_ss:
377 	writel(0, ss->base + SS_CTL);
378 	spin_unlock_irqrestore(&ss->slock, flags);
379 
380 	return err;
381 }
382 
383 /* CBC AES */
sun4i_ss_cbc_aes_encrypt(struct skcipher_request * areq)384 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
385 {
386 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
387 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
388 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
389 
390 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
391 		op->keymode;
392 	return sun4i_ss_cipher_poll(areq);
393 }
394 
sun4i_ss_cbc_aes_decrypt(struct skcipher_request * areq)395 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
396 {
397 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
398 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
399 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
400 
401 	rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
402 		op->keymode;
403 	return sun4i_ss_cipher_poll(areq);
404 }
405 
406 /* ECB AES */
sun4i_ss_ecb_aes_encrypt(struct skcipher_request * areq)407 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
408 {
409 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
410 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
411 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
412 
413 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
414 		op->keymode;
415 	return sun4i_ss_cipher_poll(areq);
416 }
417 
sun4i_ss_ecb_aes_decrypt(struct skcipher_request * areq)418 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
419 {
420 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
421 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
422 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
423 
424 	rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
425 		op->keymode;
426 	return sun4i_ss_cipher_poll(areq);
427 }
428 
429 /* CBC DES */
sun4i_ss_cbc_des_encrypt(struct skcipher_request * areq)430 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
431 {
432 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
433 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
434 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
435 
436 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
437 		op->keymode;
438 	return sun4i_ss_cipher_poll(areq);
439 }
440 
sun4i_ss_cbc_des_decrypt(struct skcipher_request * areq)441 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
442 {
443 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
444 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
445 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
446 
447 	rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
448 		op->keymode;
449 	return sun4i_ss_cipher_poll(areq);
450 }
451 
452 /* ECB DES */
sun4i_ss_ecb_des_encrypt(struct skcipher_request * areq)453 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
454 {
455 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
456 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
457 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
458 
459 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
460 		op->keymode;
461 	return sun4i_ss_cipher_poll(areq);
462 }
463 
sun4i_ss_ecb_des_decrypt(struct skcipher_request * areq)464 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
465 {
466 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
467 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
468 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
469 
470 	rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
471 		op->keymode;
472 	return sun4i_ss_cipher_poll(areq);
473 }
474 
475 /* CBC 3DES */
sun4i_ss_cbc_des3_encrypt(struct skcipher_request * areq)476 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
477 {
478 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
479 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
480 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
481 
482 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
483 		op->keymode;
484 	return sun4i_ss_cipher_poll(areq);
485 }
486 
sun4i_ss_cbc_des3_decrypt(struct skcipher_request * areq)487 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
488 {
489 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
490 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
491 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
492 
493 	rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
494 		op->keymode;
495 	return sun4i_ss_cipher_poll(areq);
496 }
497 
498 /* ECB 3DES */
sun4i_ss_ecb_des3_encrypt(struct skcipher_request * areq)499 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
500 {
501 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
502 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
503 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
504 
505 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
506 		op->keymode;
507 	return sun4i_ss_cipher_poll(areq);
508 }
509 
sun4i_ss_ecb_des3_decrypt(struct skcipher_request * areq)510 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
511 {
512 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
513 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
514 	struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
515 
516 	rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
517 		op->keymode;
518 	return sun4i_ss_cipher_poll(areq);
519 }
520 
sun4i_ss_cipher_init(struct crypto_tfm * tfm)521 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
522 {
523 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
524 	struct sun4i_ss_alg_template *algt;
525 	const char *name = crypto_tfm_alg_name(tfm);
526 	int err;
527 
528 	memset(op, 0, sizeof(struct sun4i_tfm_ctx));
529 
530 	algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
531 			    alg.crypto.base);
532 	op->ss = algt->ss;
533 
534 	op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
535 	if (IS_ERR(op->fallback_tfm)) {
536 		dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
537 			name, PTR_ERR(op->fallback_tfm));
538 		return PTR_ERR(op->fallback_tfm);
539 	}
540 
541 	crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
542 				    sizeof(struct sun4i_cipher_req_ctx) +
543 				    crypto_skcipher_reqsize(op->fallback_tfm));
544 
545 
546 	err = pm_runtime_get_sync(op->ss->dev);
547 	if (err < 0)
548 		goto error_pm;
549 
550 	return 0;
551 error_pm:
552 	crypto_free_skcipher(op->fallback_tfm);
553 	return err;
554 }
555 
sun4i_ss_cipher_exit(struct crypto_tfm * tfm)556 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
557 {
558 	struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
559 
560 	crypto_free_skcipher(op->fallback_tfm);
561 	pm_runtime_put(op->ss->dev);
562 }
563 
564 /* check and set the AES key, prepare the mode to be used */
sun4i_ss_aes_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)565 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
566 			unsigned int keylen)
567 {
568 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
569 	struct sun4i_ss_ctx *ss = op->ss;
570 
571 	switch (keylen) {
572 	case 128 / 8:
573 		op->keymode = SS_AES_128BITS;
574 		break;
575 	case 192 / 8:
576 		op->keymode = SS_AES_192BITS;
577 		break;
578 	case 256 / 8:
579 		op->keymode = SS_AES_256BITS;
580 		break;
581 	default:
582 		dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
583 		return -EINVAL;
584 	}
585 	op->keylen = keylen;
586 	memcpy(op->key, key, keylen);
587 
588 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
589 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
590 
591 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
592 }
593 
594 /* check and set the DES key, prepare the mode to be used */
sun4i_ss_des_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)595 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
596 			unsigned int keylen)
597 {
598 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
599 	int err;
600 
601 	err = verify_skcipher_des_key(tfm, key);
602 	if (err)
603 		return err;
604 
605 	op->keylen = keylen;
606 	memcpy(op->key, key, keylen);
607 
608 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
609 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
610 
611 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
612 }
613 
614 /* check and set the 3DES key, prepare the mode to be used */
sun4i_ss_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)615 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
616 			 unsigned int keylen)
617 {
618 	struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
619 	int err;
620 
621 	err = verify_skcipher_des3_key(tfm, key);
622 	if (err)
623 		return err;
624 
625 	op->keylen = keylen;
626 	memcpy(op->key, key, keylen);
627 
628 	crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
629 	crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
630 
631 	return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
632 
633 }
634