• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Crypto acceleration support for Rockchip RK3288
3  *
4  * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
5  *
6  * Author: Zain Wang <zain.wang@rock-chips.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
13  */
14 #include "rk3288_crypto.h"
15 
16 #define RK_CRYPTO_DEC			BIT(0)
17 
rk_crypto_complete(struct crypto_async_request * base,int err)18 static void rk_crypto_complete(struct crypto_async_request *base, int err)
19 {
20 	if (base->complete)
21 		base->complete(base, err);
22 }
23 
rk_handle_req(struct rk_crypto_info * dev,struct ablkcipher_request * req)24 static int rk_handle_req(struct rk_crypto_info *dev,
25 			 struct ablkcipher_request *req)
26 {
27 	if (!IS_ALIGNED(req->nbytes, dev->align_size))
28 		return -EINVAL;
29 	else
30 		return dev->enqueue(dev, &req->base);
31 }
32 
rk_aes_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)33 static int rk_aes_setkey(struct crypto_ablkcipher *cipher,
34 			 const u8 *key, unsigned int keylen)
35 {
36 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
37 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
38 
39 	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
40 	    keylen != AES_KEYSIZE_256) {
41 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
42 		return -EINVAL;
43 	}
44 	ctx->keylen = keylen;
45 	memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, key, keylen);
46 	return 0;
47 }
48 
rk_tdes_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)49 static int rk_tdes_setkey(struct crypto_ablkcipher *cipher,
50 			  const u8 *key, unsigned int keylen)
51 {
52 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
53 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
54 	u32 tmp[DES_EXPKEY_WORDS];
55 
56 	if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
57 		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
58 		return -EINVAL;
59 	}
60 
61 	if (keylen == DES_KEY_SIZE) {
62 		if (!des_ekey(tmp, key) &&
63 		    (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
64 			tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
65 			return -EINVAL;
66 		}
67 	}
68 
69 	ctx->keylen = keylen;
70 	memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, key, keylen);
71 	return 0;
72 }
73 
rk_aes_ecb_encrypt(struct ablkcipher_request * req)74 static int rk_aes_ecb_encrypt(struct ablkcipher_request *req)
75 {
76 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
77 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
78 	struct rk_crypto_info *dev = ctx->dev;
79 
80 	ctx->mode = RK_CRYPTO_AES_ECB_MODE;
81 	return rk_handle_req(dev, req);
82 }
83 
rk_aes_ecb_decrypt(struct ablkcipher_request * req)84 static int rk_aes_ecb_decrypt(struct ablkcipher_request *req)
85 {
86 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
87 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
88 	struct rk_crypto_info *dev = ctx->dev;
89 
90 	ctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
91 	return rk_handle_req(dev, req);
92 }
93 
rk_aes_cbc_encrypt(struct ablkcipher_request * req)94 static int rk_aes_cbc_encrypt(struct ablkcipher_request *req)
95 {
96 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
97 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
98 	struct rk_crypto_info *dev = ctx->dev;
99 
100 	ctx->mode = RK_CRYPTO_AES_CBC_MODE;
101 	return rk_handle_req(dev, req);
102 }
103 
rk_aes_cbc_decrypt(struct ablkcipher_request * req)104 static int rk_aes_cbc_decrypt(struct ablkcipher_request *req)
105 {
106 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
107 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
108 	struct rk_crypto_info *dev = ctx->dev;
109 
110 	ctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
111 	return rk_handle_req(dev, req);
112 }
113 
rk_des_ecb_encrypt(struct ablkcipher_request * req)114 static int rk_des_ecb_encrypt(struct ablkcipher_request *req)
115 {
116 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
117 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
118 	struct rk_crypto_info *dev = ctx->dev;
119 
120 	ctx->mode = 0;
121 	return rk_handle_req(dev, req);
122 }
123 
rk_des_ecb_decrypt(struct ablkcipher_request * req)124 static int rk_des_ecb_decrypt(struct ablkcipher_request *req)
125 {
126 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
127 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
128 	struct rk_crypto_info *dev = ctx->dev;
129 
130 	ctx->mode = RK_CRYPTO_DEC;
131 	return rk_handle_req(dev, req);
132 }
133 
rk_des_cbc_encrypt(struct ablkcipher_request * req)134 static int rk_des_cbc_encrypt(struct ablkcipher_request *req)
135 {
136 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
137 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
138 	struct rk_crypto_info *dev = ctx->dev;
139 
140 	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
141 	return rk_handle_req(dev, req);
142 }
143 
rk_des_cbc_decrypt(struct ablkcipher_request * req)144 static int rk_des_cbc_decrypt(struct ablkcipher_request *req)
145 {
146 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
147 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
148 	struct rk_crypto_info *dev = ctx->dev;
149 
150 	ctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
151 	return rk_handle_req(dev, req);
152 }
153 
rk_des3_ede_ecb_encrypt(struct ablkcipher_request * req)154 static int rk_des3_ede_ecb_encrypt(struct ablkcipher_request *req)
155 {
156 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
157 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
158 	struct rk_crypto_info *dev = ctx->dev;
159 
160 	ctx->mode = RK_CRYPTO_TDES_SELECT;
161 	return rk_handle_req(dev, req);
162 }
163 
rk_des3_ede_ecb_decrypt(struct ablkcipher_request * req)164 static int rk_des3_ede_ecb_decrypt(struct ablkcipher_request *req)
165 {
166 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
167 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
168 	struct rk_crypto_info *dev = ctx->dev;
169 
170 	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
171 	return rk_handle_req(dev, req);
172 }
173 
rk_des3_ede_cbc_encrypt(struct ablkcipher_request * req)174 static int rk_des3_ede_cbc_encrypt(struct ablkcipher_request *req)
175 {
176 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
177 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
178 	struct rk_crypto_info *dev = ctx->dev;
179 
180 	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
181 	return rk_handle_req(dev, req);
182 }
183 
rk_des3_ede_cbc_decrypt(struct ablkcipher_request * req)184 static int rk_des3_ede_cbc_decrypt(struct ablkcipher_request *req)
185 {
186 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
187 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
188 	struct rk_crypto_info *dev = ctx->dev;
189 
190 	ctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
191 		    RK_CRYPTO_DEC;
192 	return rk_handle_req(dev, req);
193 }
194 
rk_ablk_hw_init(struct rk_crypto_info * dev)195 static void rk_ablk_hw_init(struct rk_crypto_info *dev)
196 {
197 	struct ablkcipher_request *req =
198 		ablkcipher_request_cast(dev->async_req);
199 	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
200 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
201 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(cipher);
202 	u32 ivsize, block, conf_reg = 0;
203 
204 	block = crypto_tfm_alg_blocksize(tfm);
205 	ivsize = crypto_ablkcipher_ivsize(cipher);
206 
207 	if (block == DES_BLOCK_SIZE) {
208 		ctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
209 			     RK_CRYPTO_TDES_BYTESWAP_KEY |
210 			     RK_CRYPTO_TDES_BYTESWAP_IV;
211 		CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, ctx->mode);
212 		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, req->info, ivsize);
213 		conf_reg = RK_CRYPTO_DESSEL;
214 	} else {
215 		ctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
216 			     RK_CRYPTO_AES_KEY_CHANGE |
217 			     RK_CRYPTO_AES_BYTESWAP_KEY |
218 			     RK_CRYPTO_AES_BYTESWAP_IV;
219 		if (ctx->keylen == AES_KEYSIZE_192)
220 			ctx->mode |= RK_CRYPTO_AES_192BIT_key;
221 		else if (ctx->keylen == AES_KEYSIZE_256)
222 			ctx->mode |= RK_CRYPTO_AES_256BIT_key;
223 		CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, ctx->mode);
224 		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, req->info, ivsize);
225 	}
226 	conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
227 		    RK_CRYPTO_BYTESWAP_BRFIFO;
228 	CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
229 	CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
230 		     RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
231 }
232 
crypto_dma_start(struct rk_crypto_info * dev)233 static void crypto_dma_start(struct rk_crypto_info *dev)
234 {
235 	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, dev->addr_in);
236 	CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, dev->count / 4);
237 	CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, dev->addr_out);
238 	CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
239 		     _SBF(RK_CRYPTO_BLOCK_START, 16));
240 }
241 
rk_set_data_start(struct rk_crypto_info * dev)242 static int rk_set_data_start(struct rk_crypto_info *dev)
243 {
244 	int err;
245 	struct ablkcipher_request *req =
246 		ablkcipher_request_cast(dev->async_req);
247 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
248 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
249 	u32 ivsize = crypto_ablkcipher_ivsize(tfm);
250 	u8 *src_last_blk = page_address(sg_page(dev->sg_src)) +
251 		dev->sg_src->offset + dev->sg_src->length - ivsize;
252 
253 	/* Store the iv that need to be updated in chain mode.
254 	 * And update the IV buffer to contain the next IV for decryption mode.
255 	 */
256 	if (ctx->mode & RK_CRYPTO_DEC) {
257 		memcpy(ctx->iv, src_last_blk, ivsize);
258 		sg_pcopy_to_buffer(dev->first, dev->src_nents, req->info,
259 				   ivsize, dev->total - ivsize);
260 	}
261 
262 	err = dev->load_data(dev, dev->sg_src, dev->sg_dst);
263 	if (!err)
264 		crypto_dma_start(dev);
265 	return err;
266 }
267 
rk_ablk_start(struct rk_crypto_info * dev)268 static int rk_ablk_start(struct rk_crypto_info *dev)
269 {
270 	struct ablkcipher_request *req =
271 		ablkcipher_request_cast(dev->async_req);
272 	unsigned long flags;
273 	int err = 0;
274 
275 	dev->left_bytes = req->nbytes;
276 	dev->total = req->nbytes;
277 	dev->sg_src = req->src;
278 	dev->first = req->src;
279 	dev->src_nents = sg_nents(req->src);
280 	dev->sg_dst = req->dst;
281 	dev->dst_nents = sg_nents(req->dst);
282 	dev->aligned = 1;
283 
284 	spin_lock_irqsave(&dev->lock, flags);
285 	rk_ablk_hw_init(dev);
286 	err = rk_set_data_start(dev);
287 	spin_unlock_irqrestore(&dev->lock, flags);
288 	return err;
289 }
290 
rk_iv_copyback(struct rk_crypto_info * dev)291 static void rk_iv_copyback(struct rk_crypto_info *dev)
292 {
293 	struct ablkcipher_request *req =
294 		ablkcipher_request_cast(dev->async_req);
295 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
296 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
297 	u32 ivsize = crypto_ablkcipher_ivsize(tfm);
298 
299 	/* Update the IV buffer to contain the next IV for encryption mode. */
300 	if (!(ctx->mode & RK_CRYPTO_DEC)) {
301 		if (dev->aligned) {
302 			memcpy(req->info, sg_virt(dev->sg_dst) +
303 				dev->sg_dst->length - ivsize, ivsize);
304 		} else {
305 			memcpy(req->info, dev->addr_vir +
306 				dev->count - ivsize, ivsize);
307 		}
308 	}
309 }
310 
rk_update_iv(struct rk_crypto_info * dev)311 static void rk_update_iv(struct rk_crypto_info *dev)
312 {
313 	struct ablkcipher_request *req =
314 		ablkcipher_request_cast(dev->async_req);
315 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
316 	struct rk_cipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
317 	u32 ivsize = crypto_ablkcipher_ivsize(tfm);
318 	u8 *new_iv = NULL;
319 
320 	if (ctx->mode & RK_CRYPTO_DEC) {
321 		new_iv = ctx->iv;
322 	} else {
323 		new_iv = page_address(sg_page(dev->sg_dst)) +
324 			 dev->sg_dst->offset + dev->sg_dst->length - ivsize;
325 	}
326 
327 	if (ivsize == DES_BLOCK_SIZE)
328 		memcpy_toio(dev->reg + RK_CRYPTO_TDES_IV_0, new_iv, ivsize);
329 	else if (ivsize == AES_BLOCK_SIZE)
330 		memcpy_toio(dev->reg + RK_CRYPTO_AES_IV_0, new_iv, ivsize);
331 }
332 
333 /* return:
334  *	true	some err was occurred
335  *	fault	no err, continue
336  */
rk_ablk_rx(struct rk_crypto_info * dev)337 static int rk_ablk_rx(struct rk_crypto_info *dev)
338 {
339 	int err = 0;
340 	struct ablkcipher_request *req =
341 		ablkcipher_request_cast(dev->async_req);
342 
343 	dev->unload_data(dev);
344 	if (!dev->aligned) {
345 		if (!sg_pcopy_from_buffer(req->dst, dev->dst_nents,
346 					  dev->addr_vir, dev->count,
347 					  dev->total - dev->left_bytes -
348 					  dev->count)) {
349 			err = -EINVAL;
350 			goto out_rx;
351 		}
352 	}
353 	if (dev->left_bytes) {
354 		rk_update_iv(dev);
355 		if (dev->aligned) {
356 			if (sg_is_last(dev->sg_src)) {
357 				dev_err(dev->dev, "[%s:%d] Lack of data\n",
358 					__func__, __LINE__);
359 				err = -ENOMEM;
360 				goto out_rx;
361 			}
362 			dev->sg_src = sg_next(dev->sg_src);
363 			dev->sg_dst = sg_next(dev->sg_dst);
364 		}
365 		err = rk_set_data_start(dev);
366 	} else {
367 		rk_iv_copyback(dev);
368 		/* here show the calculation is over without any err */
369 		dev->complete(dev->async_req, 0);
370 		tasklet_schedule(&dev->queue_task);
371 	}
372 out_rx:
373 	return err;
374 }
375 
rk_ablk_cra_init(struct crypto_tfm * tfm)376 static int rk_ablk_cra_init(struct crypto_tfm *tfm)
377 {
378 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
379 	struct crypto_alg *alg = tfm->__crt_alg;
380 	struct rk_crypto_tmp *algt;
381 
382 	algt = container_of(alg, struct rk_crypto_tmp, alg.crypto);
383 
384 	ctx->dev = algt->dev;
385 	ctx->dev->align_size = crypto_tfm_alg_alignmask(tfm) + 1;
386 	ctx->dev->start = rk_ablk_start;
387 	ctx->dev->update = rk_ablk_rx;
388 	ctx->dev->complete = rk_crypto_complete;
389 	ctx->dev->addr_vir = (char *)__get_free_page(GFP_KERNEL);
390 
391 	return ctx->dev->addr_vir ? ctx->dev->enable_clk(ctx->dev) : -ENOMEM;
392 }
393 
rk_ablk_cra_exit(struct crypto_tfm * tfm)394 static void rk_ablk_cra_exit(struct crypto_tfm *tfm)
395 {
396 	struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
397 
398 	free_page((unsigned long)ctx->dev->addr_vir);
399 	ctx->dev->disable_clk(ctx->dev);
400 }
401 
402 struct rk_crypto_tmp rk_ecb_aes_alg = {
403 	.type = ALG_TYPE_CIPHER,
404 	.alg.crypto = {
405 		.cra_name		= "ecb(aes)",
406 		.cra_driver_name	= "ecb-aes-rk",
407 		.cra_priority		= 300,
408 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
409 					  CRYPTO_ALG_ASYNC,
410 		.cra_blocksize		= AES_BLOCK_SIZE,
411 		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
412 		.cra_alignmask		= 0x0f,
413 		.cra_type		= &crypto_ablkcipher_type,
414 		.cra_module		= THIS_MODULE,
415 		.cra_init		= rk_ablk_cra_init,
416 		.cra_exit		= rk_ablk_cra_exit,
417 		.cra_u.ablkcipher	= {
418 			.min_keysize	= AES_MIN_KEY_SIZE,
419 			.max_keysize	= AES_MAX_KEY_SIZE,
420 			.setkey		= rk_aes_setkey,
421 			.encrypt	= rk_aes_ecb_encrypt,
422 			.decrypt	= rk_aes_ecb_decrypt,
423 		}
424 	}
425 };
426 
427 struct rk_crypto_tmp rk_cbc_aes_alg = {
428 	.type = ALG_TYPE_CIPHER,
429 	.alg.crypto = {
430 		.cra_name		= "cbc(aes)",
431 		.cra_driver_name	= "cbc-aes-rk",
432 		.cra_priority		= 300,
433 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
434 					  CRYPTO_ALG_ASYNC,
435 		.cra_blocksize		= AES_BLOCK_SIZE,
436 		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
437 		.cra_alignmask		= 0x0f,
438 		.cra_type		= &crypto_ablkcipher_type,
439 		.cra_module		= THIS_MODULE,
440 		.cra_init		= rk_ablk_cra_init,
441 		.cra_exit		= rk_ablk_cra_exit,
442 		.cra_u.ablkcipher	= {
443 			.min_keysize	= AES_MIN_KEY_SIZE,
444 			.max_keysize	= AES_MAX_KEY_SIZE,
445 			.ivsize		= AES_BLOCK_SIZE,
446 			.setkey		= rk_aes_setkey,
447 			.encrypt	= rk_aes_cbc_encrypt,
448 			.decrypt	= rk_aes_cbc_decrypt,
449 		}
450 	}
451 };
452 
453 struct rk_crypto_tmp rk_ecb_des_alg = {
454 	.type = ALG_TYPE_CIPHER,
455 	.alg.crypto = {
456 		.cra_name		= "ecb(des)",
457 		.cra_driver_name	= "ecb-des-rk",
458 		.cra_priority		= 300,
459 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
460 					  CRYPTO_ALG_ASYNC,
461 		.cra_blocksize		= DES_BLOCK_SIZE,
462 		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
463 		.cra_alignmask		= 0x07,
464 		.cra_type		= &crypto_ablkcipher_type,
465 		.cra_module		= THIS_MODULE,
466 		.cra_init		= rk_ablk_cra_init,
467 		.cra_exit		= rk_ablk_cra_exit,
468 		.cra_u.ablkcipher	= {
469 			.min_keysize	= DES_KEY_SIZE,
470 			.max_keysize	= DES_KEY_SIZE,
471 			.setkey		= rk_tdes_setkey,
472 			.encrypt	= rk_des_ecb_encrypt,
473 			.decrypt	= rk_des_ecb_decrypt,
474 		}
475 	}
476 };
477 
478 struct rk_crypto_tmp rk_cbc_des_alg = {
479 	.type = ALG_TYPE_CIPHER,
480 	.alg.crypto = {
481 		.cra_name		= "cbc(des)",
482 		.cra_driver_name	= "cbc-des-rk",
483 		.cra_priority		= 300,
484 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
485 					  CRYPTO_ALG_ASYNC,
486 		.cra_blocksize		= DES_BLOCK_SIZE,
487 		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
488 		.cra_alignmask		= 0x07,
489 		.cra_type		= &crypto_ablkcipher_type,
490 		.cra_module		= THIS_MODULE,
491 		.cra_init		= rk_ablk_cra_init,
492 		.cra_exit		= rk_ablk_cra_exit,
493 		.cra_u.ablkcipher	= {
494 			.min_keysize	= DES_KEY_SIZE,
495 			.max_keysize	= DES_KEY_SIZE,
496 			.ivsize		= DES_BLOCK_SIZE,
497 			.setkey		= rk_tdes_setkey,
498 			.encrypt	= rk_des_cbc_encrypt,
499 			.decrypt	= rk_des_cbc_decrypt,
500 		}
501 	}
502 };
503 
504 struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
505 	.type = ALG_TYPE_CIPHER,
506 	.alg.crypto = {
507 		.cra_name		= "ecb(des3_ede)",
508 		.cra_driver_name	= "ecb-des3-ede-rk",
509 		.cra_priority		= 300,
510 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
511 					  CRYPTO_ALG_ASYNC,
512 		.cra_blocksize		= DES_BLOCK_SIZE,
513 		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
514 		.cra_alignmask		= 0x07,
515 		.cra_type		= &crypto_ablkcipher_type,
516 		.cra_module		= THIS_MODULE,
517 		.cra_init		= rk_ablk_cra_init,
518 		.cra_exit		= rk_ablk_cra_exit,
519 		.cra_u.ablkcipher	= {
520 			.min_keysize	= DES3_EDE_KEY_SIZE,
521 			.max_keysize	= DES3_EDE_KEY_SIZE,
522 			.ivsize		= DES_BLOCK_SIZE,
523 			.setkey		= rk_tdes_setkey,
524 			.encrypt	= rk_des3_ede_ecb_encrypt,
525 			.decrypt	= rk_des3_ede_ecb_decrypt,
526 		}
527 	}
528 };
529 
530 struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
531 	.type = ALG_TYPE_CIPHER,
532 	.alg.crypto = {
533 		.cra_name		= "cbc(des3_ede)",
534 		.cra_driver_name	= "cbc-des3-ede-rk",
535 		.cra_priority		= 300,
536 		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
537 					  CRYPTO_ALG_ASYNC,
538 		.cra_blocksize		= DES_BLOCK_SIZE,
539 		.cra_ctxsize		= sizeof(struct rk_cipher_ctx),
540 		.cra_alignmask		= 0x07,
541 		.cra_type		= &crypto_ablkcipher_type,
542 		.cra_module		= THIS_MODULE,
543 		.cra_init		= rk_ablk_cra_init,
544 		.cra_exit		= rk_ablk_cra_exit,
545 		.cra_u.ablkcipher	= {
546 			.min_keysize	= DES3_EDE_KEY_SIZE,
547 			.max_keysize	= DES3_EDE_KEY_SIZE,
548 			.ivsize		= DES_BLOCK_SIZE,
549 			.setkey		= rk_tdes_setkey,
550 			.encrypt	= rk_des3_ede_cbc_encrypt,
551 			.decrypt	= rk_des3_ede_cbc_decrypt,
552 		}
553 	}
554 };
555