• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 and
6  * only version 2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13 
14 #include <linux/device.h>
15 #include <linux/interrupt.h>
16 #include <linux/types.h>
17 #include <crypto/aes.h>
18 #include <crypto/des.h>
19 #include <crypto/internal/skcipher.h>
20 
21 #include "cipher.h"
22 
23 static LIST_HEAD(ablkcipher_algs);
24 
qce_ablkcipher_done(void * data)25 static void qce_ablkcipher_done(void *data)
26 {
27 	struct crypto_async_request *async_req = data;
28 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
29 	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
30 	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
31 	struct qce_device *qce = tmpl->qce;
32 	enum dma_data_direction dir_src, dir_dst;
33 	u32 status;
34 	int error;
35 	bool diff_dst;
36 
37 	diff_dst = (req->src != req->dst) ? true : false;
38 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
39 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
40 
41 	error = qce_dma_terminate_all(&qce->dma);
42 	if (error)
43 		dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
44 			error);
45 
46 	if (diff_dst)
47 		dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
48 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
49 
50 	sg_free_table(&rctx->dst_tbl);
51 
52 	error = qce_check_status(qce, &status);
53 	if (error < 0)
54 		dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
55 
56 	qce->async_req_done(tmpl->qce, error);
57 }
58 
59 static int
qce_ablkcipher_async_req_handle(struct crypto_async_request * async_req)60 qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
61 {
62 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
63 	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
64 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
65 	struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
66 	struct qce_device *qce = tmpl->qce;
67 	enum dma_data_direction dir_src, dir_dst;
68 	struct scatterlist *sg;
69 	bool diff_dst;
70 	gfp_t gfp;
71 	int ret;
72 
73 	rctx->iv = req->info;
74 	rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
75 	rctx->cryptlen = req->nbytes;
76 
77 	diff_dst = (req->src != req->dst) ? true : false;
78 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
79 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
80 
81 	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
82 	if (diff_dst)
83 		rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
84 	else
85 		rctx->dst_nents = rctx->src_nents;
86 	if (rctx->src_nents < 0) {
87 		dev_err(qce->dev, "Invalid numbers of src SG.\n");
88 		return rctx->src_nents;
89 	}
90 	if (rctx->dst_nents < 0) {
91 		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
92 		return -rctx->dst_nents;
93 	}
94 
95 	rctx->dst_nents += 1;
96 
97 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
98 						GFP_KERNEL : GFP_ATOMIC;
99 
100 	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
101 	if (ret)
102 		return ret;
103 
104 	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
105 
106 	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
107 	if (IS_ERR(sg)) {
108 		ret = PTR_ERR(sg);
109 		goto error_free;
110 	}
111 
112 	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
113 	if (IS_ERR(sg)) {
114 		ret = PTR_ERR(sg);
115 		goto error_free;
116 	}
117 
118 	sg_mark_end(sg);
119 	rctx->dst_sg = rctx->dst_tbl.sgl;
120 
121 	ret = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
122 	if (ret < 0)
123 		goto error_free;
124 
125 	if (diff_dst) {
126 		ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
127 		if (ret < 0)
128 			goto error_unmap_dst;
129 		rctx->src_sg = req->src;
130 	} else {
131 		rctx->src_sg = rctx->dst_sg;
132 	}
133 
134 	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
135 			       rctx->dst_sg, rctx->dst_nents,
136 			       qce_ablkcipher_done, async_req);
137 	if (ret)
138 		goto error_unmap_src;
139 
140 	qce_dma_issue_pending(&qce->dma);
141 
142 	ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
143 	if (ret)
144 		goto error_terminate;
145 
146 	return 0;
147 
148 error_terminate:
149 	qce_dma_terminate_all(&qce->dma);
150 error_unmap_src:
151 	if (diff_dst)
152 		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
153 error_unmap_dst:
154 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
155 error_free:
156 	sg_free_table(&rctx->dst_tbl);
157 	return ret;
158 }
159 
qce_ablkcipher_setkey(struct crypto_ablkcipher * ablk,const u8 * key,unsigned int keylen)160 static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
161 				 unsigned int keylen)
162 {
163 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
164 	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
165 	unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
166 	int ret;
167 
168 	if (!key || !keylen)
169 		return -EINVAL;
170 
171 	if (IS_AES(flags)) {
172 		switch (keylen) {
173 		case AES_KEYSIZE_128:
174 		case AES_KEYSIZE_256:
175 			break;
176 		default:
177 			goto fallback;
178 		}
179 	} else if (IS_DES(flags)) {
180 		u32 tmp[DES_EXPKEY_WORDS];
181 
182 		ret = des_ekey(tmp, key);
183 		if (!ret && crypto_ablkcipher_get_flags(ablk) &
184 		    CRYPTO_TFM_REQ_WEAK_KEY)
185 			goto weakkey;
186 	}
187 
188 	ctx->enc_keylen = keylen;
189 	memcpy(ctx->enc_key, key, keylen);
190 	return 0;
191 fallback:
192 	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
193 	if (!ret)
194 		ctx->enc_keylen = keylen;
195 	return ret;
196 weakkey:
197 	crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
198 	return -EINVAL;
199 }
200 
qce_ablkcipher_crypt(struct ablkcipher_request * req,int encrypt)201 static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
202 {
203 	struct crypto_tfm *tfm =
204 			crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
205 	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
206 	struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
207 	struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
208 	int ret;
209 
210 	rctx->flags = tmpl->alg_flags;
211 	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
212 
213 	if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
214 	    ctx->enc_keylen != AES_KEYSIZE_256) {
215 		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
216 
217 		skcipher_request_set_tfm(subreq, ctx->fallback);
218 		skcipher_request_set_callback(subreq, req->base.flags,
219 					      NULL, NULL);
220 		skcipher_request_set_crypt(subreq, req->src, req->dst,
221 					   req->nbytes, req->info);
222 		ret = encrypt ? crypto_skcipher_encrypt(subreq) :
223 				crypto_skcipher_decrypt(subreq);
224 		skcipher_request_zero(subreq);
225 		return ret;
226 	}
227 
228 	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
229 }
230 
qce_ablkcipher_encrypt(struct ablkcipher_request * req)231 static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
232 {
233 	return qce_ablkcipher_crypt(req, 1);
234 }
235 
qce_ablkcipher_decrypt(struct ablkcipher_request * req)236 static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
237 {
238 	return qce_ablkcipher_crypt(req, 0);
239 }
240 
qce_ablkcipher_init(struct crypto_tfm * tfm)241 static int qce_ablkcipher_init(struct crypto_tfm *tfm)
242 {
243 	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
244 
245 	memset(ctx, 0, sizeof(*ctx));
246 	tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
247 
248 	ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
249 					      CRYPTO_ALG_ASYNC |
250 					      CRYPTO_ALG_NEED_FALLBACK);
251 	if (IS_ERR(ctx->fallback))
252 		return PTR_ERR(ctx->fallback);
253 
254 	return 0;
255 }
256 
qce_ablkcipher_exit(struct crypto_tfm * tfm)257 static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
258 {
259 	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
260 
261 	crypto_free_skcipher(ctx->fallback);
262 }
263 
264 struct qce_ablkcipher_def {
265 	unsigned long flags;
266 	const char *name;
267 	const char *drv_name;
268 	unsigned int blocksize;
269 	unsigned int ivsize;
270 	unsigned int min_keysize;
271 	unsigned int max_keysize;
272 };
273 
274 static const struct qce_ablkcipher_def ablkcipher_def[] = {
275 	{
276 		.flags		= QCE_ALG_AES | QCE_MODE_ECB,
277 		.name		= "ecb(aes)",
278 		.drv_name	= "ecb-aes-qce",
279 		.blocksize	= AES_BLOCK_SIZE,
280 		.ivsize		= AES_BLOCK_SIZE,
281 		.min_keysize	= AES_MIN_KEY_SIZE,
282 		.max_keysize	= AES_MAX_KEY_SIZE,
283 	},
284 	{
285 		.flags		= QCE_ALG_AES | QCE_MODE_CBC,
286 		.name		= "cbc(aes)",
287 		.drv_name	= "cbc-aes-qce",
288 		.blocksize	= AES_BLOCK_SIZE,
289 		.ivsize		= AES_BLOCK_SIZE,
290 		.min_keysize	= AES_MIN_KEY_SIZE,
291 		.max_keysize	= AES_MAX_KEY_SIZE,
292 	},
293 	{
294 		.flags		= QCE_ALG_AES | QCE_MODE_CTR,
295 		.name		= "ctr(aes)",
296 		.drv_name	= "ctr-aes-qce",
297 		.blocksize	= AES_BLOCK_SIZE,
298 		.ivsize		= AES_BLOCK_SIZE,
299 		.min_keysize	= AES_MIN_KEY_SIZE,
300 		.max_keysize	= AES_MAX_KEY_SIZE,
301 	},
302 	{
303 		.flags		= QCE_ALG_AES | QCE_MODE_XTS,
304 		.name		= "xts(aes)",
305 		.drv_name	= "xts-aes-qce",
306 		.blocksize	= AES_BLOCK_SIZE,
307 		.ivsize		= AES_BLOCK_SIZE,
308 		.min_keysize	= AES_MIN_KEY_SIZE,
309 		.max_keysize	= AES_MAX_KEY_SIZE,
310 	},
311 	{
312 		.flags		= QCE_ALG_DES | QCE_MODE_ECB,
313 		.name		= "ecb(des)",
314 		.drv_name	= "ecb-des-qce",
315 		.blocksize	= DES_BLOCK_SIZE,
316 		.ivsize		= 0,
317 		.min_keysize	= DES_KEY_SIZE,
318 		.max_keysize	= DES_KEY_SIZE,
319 	},
320 	{
321 		.flags		= QCE_ALG_DES | QCE_MODE_CBC,
322 		.name		= "cbc(des)",
323 		.drv_name	= "cbc-des-qce",
324 		.blocksize	= DES_BLOCK_SIZE,
325 		.ivsize		= DES_BLOCK_SIZE,
326 		.min_keysize	= DES_KEY_SIZE,
327 		.max_keysize	= DES_KEY_SIZE,
328 	},
329 	{
330 		.flags		= QCE_ALG_3DES | QCE_MODE_ECB,
331 		.name		= "ecb(des3_ede)",
332 		.drv_name	= "ecb-3des-qce",
333 		.blocksize	= DES3_EDE_BLOCK_SIZE,
334 		.ivsize		= 0,
335 		.min_keysize	= DES3_EDE_KEY_SIZE,
336 		.max_keysize	= DES3_EDE_KEY_SIZE,
337 	},
338 	{
339 		.flags		= QCE_ALG_3DES | QCE_MODE_CBC,
340 		.name		= "cbc(des3_ede)",
341 		.drv_name	= "cbc-3des-qce",
342 		.blocksize	= DES3_EDE_BLOCK_SIZE,
343 		.ivsize		= DES3_EDE_BLOCK_SIZE,
344 		.min_keysize	= DES3_EDE_KEY_SIZE,
345 		.max_keysize	= DES3_EDE_KEY_SIZE,
346 	},
347 };
348 
qce_ablkcipher_register_one(const struct qce_ablkcipher_def * def,struct qce_device * qce)349 static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
350 				       struct qce_device *qce)
351 {
352 	struct qce_alg_template *tmpl;
353 	struct crypto_alg *alg;
354 	int ret;
355 
356 	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
357 	if (!tmpl)
358 		return -ENOMEM;
359 
360 	alg = &tmpl->alg.crypto;
361 
362 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
363 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
364 		 def->drv_name);
365 
366 	alg->cra_blocksize = def->blocksize;
367 	alg->cra_ablkcipher.ivsize = def->ivsize;
368 	alg->cra_ablkcipher.min_keysize = def->min_keysize;
369 	alg->cra_ablkcipher.max_keysize = def->max_keysize;
370 	alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
371 	alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
372 	alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
373 
374 	alg->cra_priority = 300;
375 	alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
376 			 CRYPTO_ALG_NEED_FALLBACK;
377 	alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
378 	alg->cra_alignmask = 0;
379 	alg->cra_type = &crypto_ablkcipher_type;
380 	alg->cra_module = THIS_MODULE;
381 	alg->cra_init = qce_ablkcipher_init;
382 	alg->cra_exit = qce_ablkcipher_exit;
383 	INIT_LIST_HEAD(&alg->cra_list);
384 
385 	INIT_LIST_HEAD(&tmpl->entry);
386 	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
387 	tmpl->alg_flags = def->flags;
388 	tmpl->qce = qce;
389 
390 	ret = crypto_register_alg(alg);
391 	if (ret) {
392 		kfree(tmpl);
393 		dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
394 		return ret;
395 	}
396 
397 	list_add_tail(&tmpl->entry, &ablkcipher_algs);
398 	dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
399 	return 0;
400 }
401 
qce_ablkcipher_unregister(struct qce_device * qce)402 static void qce_ablkcipher_unregister(struct qce_device *qce)
403 {
404 	struct qce_alg_template *tmpl, *n;
405 
406 	list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
407 		crypto_unregister_alg(&tmpl->alg.crypto);
408 		list_del(&tmpl->entry);
409 		kfree(tmpl);
410 	}
411 }
412 
qce_ablkcipher_register(struct qce_device * qce)413 static int qce_ablkcipher_register(struct qce_device *qce)
414 {
415 	int ret, i;
416 
417 	for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
418 		ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
419 		if (ret)
420 			goto err;
421 	}
422 
423 	return 0;
424 err:
425 	qce_ablkcipher_unregister(qce);
426 	return ret;
427 }
428 
429 const struct qce_algo_ops ablkcipher_ops = {
430 	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
431 	.register_algs = qce_ablkcipher_register,
432 	.unregister_algs = qce_ablkcipher_unregister,
433 	.async_req_handle = qce_ablkcipher_async_req_handle,
434 };
435