• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/interrupt.h>
9 #include <linux/moduleparam.h>
10 #include <linux/types.h>
11 #include <crypto/aes.h>
12 #include <crypto/internal/des.h>
13 #include <crypto/internal/skcipher.h>
14 
15 #include "cipher.h"
16 
17 static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN;
18 module_param(aes_sw_max_len, uint, 0644);
19 MODULE_PARM_DESC(aes_sw_max_len,
20 		 "Only use hardware for AES requests larger than this "
21 		 "[0=always use hardware; anything <16 breaks AES-GCM; default="
22 		 __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]");
23 
24 static LIST_HEAD(skcipher_algs);
25 
qce_skcipher_done(void * data)26 static void qce_skcipher_done(void *data)
27 {
28 	struct crypto_async_request *async_req = data;
29 	struct skcipher_request *req = skcipher_request_cast(async_req);
30 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
31 	struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
32 	struct qce_device *qce = tmpl->qce;
33 	struct qce_result_dump *result_buf = qce->dma.result_buf;
34 	enum dma_data_direction dir_src, dir_dst;
35 	u32 status;
36 	int error;
37 	bool diff_dst;
38 
39 	diff_dst = (req->src != req->dst) ? true : false;
40 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
41 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
42 
43 	error = qce_dma_terminate_all(&qce->dma);
44 	if (error)
45 		dev_dbg(qce->dev, "skcipher dma termination error (%d)\n",
46 			error);
47 
48 	if (diff_dst)
49 		dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src);
50 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
51 
52 	sg_free_table(&rctx->dst_tbl);
53 
54 	error = qce_check_status(qce, &status);
55 	if (error < 0)
56 		dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
57 
58 	memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize);
59 	qce->async_req_done(tmpl->qce, error);
60 }
61 
62 static int
qce_skcipher_async_req_handle(struct crypto_async_request * async_req)63 qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
64 {
65 	struct skcipher_request *req = skcipher_request_cast(async_req);
66 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
67 	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
68 	struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req));
69 	struct qce_device *qce = tmpl->qce;
70 	enum dma_data_direction dir_src, dir_dst;
71 	struct scatterlist *sg;
72 	bool diff_dst;
73 	gfp_t gfp;
74 	int dst_nents, src_nents, ret;
75 
76 	rctx->iv = req->iv;
77 	rctx->ivsize = crypto_skcipher_ivsize(skcipher);
78 	rctx->cryptlen = req->cryptlen;
79 
80 	diff_dst = (req->src != req->dst) ? true : false;
81 	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
82 	dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
83 
84 	rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
85 	if (diff_dst)
86 		rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
87 	else
88 		rctx->dst_nents = rctx->src_nents;
89 	if (rctx->src_nents < 0) {
90 		dev_err(qce->dev, "Invalid numbers of src SG.\n");
91 		return rctx->src_nents;
92 	}
93 	if (rctx->dst_nents < 0) {
94 		dev_err(qce->dev, "Invalid numbers of dst SG.\n");
95 		return -rctx->dst_nents;
96 	}
97 
98 	rctx->dst_nents += 1;
99 
100 	gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
101 						GFP_KERNEL : GFP_ATOMIC;
102 
103 	ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
104 	if (ret)
105 		return ret;
106 
107 	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
108 
109 	sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen);
110 	if (IS_ERR(sg)) {
111 		ret = PTR_ERR(sg);
112 		goto error_free;
113 	}
114 
115 	sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg,
116 			     QCE_RESULT_BUF_SZ);
117 	if (IS_ERR(sg)) {
118 		ret = PTR_ERR(sg);
119 		goto error_free;
120 	}
121 
122 	sg_mark_end(sg);
123 	rctx->dst_sg = rctx->dst_tbl.sgl;
124 
125 	dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
126 	if (dst_nents < 0) {
127 		ret = dst_nents;
128 		goto error_free;
129 	}
130 
131 	if (diff_dst) {
132 		src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src);
133 		if (src_nents < 0) {
134 			ret = src_nents;
135 			goto error_unmap_dst;
136 		}
137 		rctx->src_sg = req->src;
138 	} else {
139 		rctx->src_sg = rctx->dst_sg;
140 		src_nents = dst_nents - 1;
141 	}
142 
143 	ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents,
144 			       rctx->dst_sg, dst_nents,
145 			       qce_skcipher_done, async_req);
146 	if (ret)
147 		goto error_unmap_src;
148 
149 	qce_dma_issue_pending(&qce->dma);
150 
151 	ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0);
152 	if (ret)
153 		goto error_terminate;
154 
155 	return 0;
156 
157 error_terminate:
158 	qce_dma_terminate_all(&qce->dma);
159 error_unmap_src:
160 	if (diff_dst)
161 		dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src);
162 error_unmap_dst:
163 	dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst);
164 error_free:
165 	sg_free_table(&rctx->dst_tbl);
166 	return ret;
167 }
168 
qce_skcipher_setkey(struct crypto_skcipher * ablk,const u8 * key,unsigned int keylen)169 static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key,
170 				 unsigned int keylen)
171 {
172 	struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk);
173 	struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
174 	unsigned long flags = to_cipher_tmpl(ablk)->alg_flags;
175 	int ret;
176 
177 	if (!key || !keylen)
178 		return -EINVAL;
179 
180 	switch (IS_XTS(flags) ? keylen >> 1 : keylen) {
181 	case AES_KEYSIZE_128:
182 	case AES_KEYSIZE_256:
183 		memcpy(ctx->enc_key, key, keylen);
184 		break;
185 	}
186 
187 	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
188 	if (!ret)
189 		ctx->enc_keylen = keylen;
190 	return ret;
191 }
192 
qce_des_setkey(struct crypto_skcipher * ablk,const u8 * key,unsigned int keylen)193 static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key,
194 			  unsigned int keylen)
195 {
196 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
197 	int err;
198 
199 	err = verify_skcipher_des_key(ablk, key);
200 	if (err)
201 		return err;
202 
203 	ctx->enc_keylen = keylen;
204 	memcpy(ctx->enc_key, key, keylen);
205 	return 0;
206 }
207 
qce_des3_setkey(struct crypto_skcipher * ablk,const u8 * key,unsigned int keylen)208 static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key,
209 			   unsigned int keylen)
210 {
211 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk);
212 	int err;
213 
214 	err = verify_skcipher_des3_key(ablk, key);
215 	if (err)
216 		return err;
217 
218 	ctx->enc_keylen = keylen;
219 	memcpy(ctx->enc_key, key, keylen);
220 	return 0;
221 }
222 
qce_skcipher_crypt(struct skcipher_request * req,int encrypt)223 static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt)
224 {
225 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
226 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
227 	struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
228 	struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
229 	int keylen;
230 	int ret;
231 
232 	rctx->flags = tmpl->alg_flags;
233 	rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
234 	keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen;
235 
236 	/* qce is hanging when AES-XTS request len > QCE_SECTOR_SIZE and
237 	 * is not a multiple of it; pass such requests to the fallback
238 	 */
239 	if (IS_AES(rctx->flags) &&
240 	    (((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) ||
241 	      req->cryptlen <= aes_sw_max_len) ||
242 	     (IS_XTS(rctx->flags) && req->cryptlen > QCE_SECTOR_SIZE &&
243 	      req->cryptlen % QCE_SECTOR_SIZE))) {
244 		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
245 		skcipher_request_set_callback(&rctx->fallback_req,
246 					      req->base.flags,
247 					      req->base.complete,
248 					      req->base.data);
249 		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
250 					   req->dst, req->cryptlen, req->iv);
251 		ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
252 				crypto_skcipher_decrypt(&rctx->fallback_req);
253 		return ret;
254 	}
255 
256 	return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
257 }
258 
qce_skcipher_encrypt(struct skcipher_request * req)259 static int qce_skcipher_encrypt(struct skcipher_request *req)
260 {
261 	return qce_skcipher_crypt(req, 1);
262 }
263 
qce_skcipher_decrypt(struct skcipher_request * req)264 static int qce_skcipher_decrypt(struct skcipher_request *req)
265 {
266 	return qce_skcipher_crypt(req, 0);
267 }
268 
qce_skcipher_init(struct crypto_skcipher * tfm)269 static int qce_skcipher_init(struct crypto_skcipher *tfm)
270 {
271 	/* take the size without the fallback skcipher_request at the end */
272 	crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx,
273 						  fallback_req));
274 	return 0;
275 }
276 
qce_skcipher_init_fallback(struct crypto_skcipher * tfm)277 static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm)
278 {
279 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
280 
281 	ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
282 					      0, CRYPTO_ALG_NEED_FALLBACK);
283 	if (IS_ERR(ctx->fallback))
284 		return PTR_ERR(ctx->fallback);
285 
286 	crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) +
287 					 crypto_skcipher_reqsize(ctx->fallback));
288 	return 0;
289 }
290 
qce_skcipher_exit(struct crypto_skcipher * tfm)291 static void qce_skcipher_exit(struct crypto_skcipher *tfm)
292 {
293 	struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
294 
295 	crypto_free_skcipher(ctx->fallback);
296 }
297 
298 struct qce_skcipher_def {
299 	unsigned long flags;
300 	const char *name;
301 	const char *drv_name;
302 	unsigned int blocksize;
303 	unsigned int chunksize;
304 	unsigned int ivsize;
305 	unsigned int min_keysize;
306 	unsigned int max_keysize;
307 };
308 
309 static const struct qce_skcipher_def skcipher_def[] = {
310 	{
311 		.flags		= QCE_ALG_AES | QCE_MODE_ECB,
312 		.name		= "ecb(aes)",
313 		.drv_name	= "ecb-aes-qce",
314 		.blocksize	= AES_BLOCK_SIZE,
315 		.ivsize		= AES_BLOCK_SIZE,
316 		.min_keysize	= AES_MIN_KEY_SIZE,
317 		.max_keysize	= AES_MAX_KEY_SIZE,
318 	},
319 	{
320 		.flags		= QCE_ALG_AES | QCE_MODE_CBC,
321 		.name		= "cbc(aes)",
322 		.drv_name	= "cbc-aes-qce",
323 		.blocksize	= AES_BLOCK_SIZE,
324 		.ivsize		= AES_BLOCK_SIZE,
325 		.min_keysize	= AES_MIN_KEY_SIZE,
326 		.max_keysize	= AES_MAX_KEY_SIZE,
327 	},
328 	{
329 		.flags		= QCE_ALG_AES | QCE_MODE_CTR,
330 		.name		= "ctr(aes)",
331 		.drv_name	= "ctr-aes-qce",
332 		.blocksize	= 1,
333 		.chunksize	= AES_BLOCK_SIZE,
334 		.ivsize		= AES_BLOCK_SIZE,
335 		.min_keysize	= AES_MIN_KEY_SIZE,
336 		.max_keysize	= AES_MAX_KEY_SIZE,
337 	},
338 	{
339 		.flags		= QCE_ALG_AES | QCE_MODE_XTS,
340 		.name		= "xts(aes)",
341 		.drv_name	= "xts-aes-qce",
342 		.blocksize	= AES_BLOCK_SIZE,
343 		.ivsize		= AES_BLOCK_SIZE,
344 		.min_keysize	= AES_MIN_KEY_SIZE * 2,
345 		.max_keysize	= AES_MAX_KEY_SIZE * 2,
346 	},
347 	{
348 		.flags		= QCE_ALG_DES | QCE_MODE_ECB,
349 		.name		= "ecb(des)",
350 		.drv_name	= "ecb-des-qce",
351 		.blocksize	= DES_BLOCK_SIZE,
352 		.ivsize		= 0,
353 		.min_keysize	= DES_KEY_SIZE,
354 		.max_keysize	= DES_KEY_SIZE,
355 	},
356 	{
357 		.flags		= QCE_ALG_DES | QCE_MODE_CBC,
358 		.name		= "cbc(des)",
359 		.drv_name	= "cbc-des-qce",
360 		.blocksize	= DES_BLOCK_SIZE,
361 		.ivsize		= DES_BLOCK_SIZE,
362 		.min_keysize	= DES_KEY_SIZE,
363 		.max_keysize	= DES_KEY_SIZE,
364 	},
365 	{
366 		.flags		= QCE_ALG_3DES | QCE_MODE_ECB,
367 		.name		= "ecb(des3_ede)",
368 		.drv_name	= "ecb-3des-qce",
369 		.blocksize	= DES3_EDE_BLOCK_SIZE,
370 		.ivsize		= 0,
371 		.min_keysize	= DES3_EDE_KEY_SIZE,
372 		.max_keysize	= DES3_EDE_KEY_SIZE,
373 	},
374 	{
375 		.flags		= QCE_ALG_3DES | QCE_MODE_CBC,
376 		.name		= "cbc(des3_ede)",
377 		.drv_name	= "cbc-3des-qce",
378 		.blocksize	= DES3_EDE_BLOCK_SIZE,
379 		.ivsize		= DES3_EDE_BLOCK_SIZE,
380 		.min_keysize	= DES3_EDE_KEY_SIZE,
381 		.max_keysize	= DES3_EDE_KEY_SIZE,
382 	},
383 };
384 
qce_skcipher_register_one(const struct qce_skcipher_def * def,struct qce_device * qce)385 static int qce_skcipher_register_one(const struct qce_skcipher_def *def,
386 				       struct qce_device *qce)
387 {
388 	struct qce_alg_template *tmpl;
389 	struct skcipher_alg *alg;
390 	int ret;
391 
392 	tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
393 	if (!tmpl)
394 		return -ENOMEM;
395 
396 	alg = &tmpl->alg.skcipher;
397 
398 	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
399 	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
400 		 def->drv_name);
401 
402 	alg->base.cra_blocksize		= def->blocksize;
403 	alg->chunksize			= def->chunksize;
404 	alg->ivsize			= def->ivsize;
405 	alg->min_keysize		= def->min_keysize;
406 	alg->max_keysize		= def->max_keysize;
407 	alg->setkey			= IS_3DES(def->flags) ? qce_des3_setkey :
408 					  IS_DES(def->flags) ? qce_des_setkey :
409 					  qce_skcipher_setkey;
410 	alg->encrypt			= qce_skcipher_encrypt;
411 	alg->decrypt			= qce_skcipher_decrypt;
412 
413 	alg->base.cra_priority		= 300;
414 	alg->base.cra_flags		= CRYPTO_ALG_ASYNC |
415 					  CRYPTO_ALG_ALLOCATES_MEMORY |
416 					  CRYPTO_ALG_KERN_DRIVER_ONLY;
417 	alg->base.cra_ctxsize		= sizeof(struct qce_cipher_ctx);
418 	alg->base.cra_alignmask		= 0;
419 	alg->base.cra_module		= THIS_MODULE;
420 
421 	if (IS_AES(def->flags)) {
422 		alg->base.cra_flags    |= CRYPTO_ALG_NEED_FALLBACK;
423 		alg->init		= qce_skcipher_init_fallback;
424 		alg->exit		= qce_skcipher_exit;
425 	} else {
426 		alg->init		= qce_skcipher_init;
427 	}
428 
429 	INIT_LIST_HEAD(&tmpl->entry);
430 	tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER;
431 	tmpl->alg_flags = def->flags;
432 	tmpl->qce = qce;
433 
434 	ret = crypto_register_skcipher(alg);
435 	if (ret) {
436 		dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name);
437 		kfree(tmpl);
438 		return ret;
439 	}
440 
441 	list_add_tail(&tmpl->entry, &skcipher_algs);
442 	dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name);
443 	return 0;
444 }
445 
qce_skcipher_unregister(struct qce_device * qce)446 static void qce_skcipher_unregister(struct qce_device *qce)
447 {
448 	struct qce_alg_template *tmpl, *n;
449 
450 	list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) {
451 		crypto_unregister_skcipher(&tmpl->alg.skcipher);
452 		list_del(&tmpl->entry);
453 		kfree(tmpl);
454 	}
455 }
456 
qce_skcipher_register(struct qce_device * qce)457 static int qce_skcipher_register(struct qce_device *qce)
458 {
459 	int ret, i;
460 
461 	for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) {
462 		ret = qce_skcipher_register_one(&skcipher_def[i], qce);
463 		if (ret)
464 			goto err;
465 	}
466 
467 	return 0;
468 err:
469 	qce_skcipher_unregister(qce);
470 	return ret;
471 }
472 
473 const struct qce_algo_ops skcipher_ops = {
474 	.type = CRYPTO_ALG_TYPE_SKCIPHER,
475 	.register_algs = qce_skcipher_register,
476 	.unregister_algs = qce_skcipher_unregister,
477 	.async_req_handle = qce_skcipher_async_req_handle,
478 };
479