• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /* Algorithms supported by virtio crypto device
2   *
3   * Authors: Gonglei <arei.gonglei@huawei.com>
4   *
5   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
6   *
7   * This program is free software; you can redistribute it and/or modify
8   * it under the terms of the GNU General Public License as published by
9   * the Free Software Foundation; either version 2 of the License, or
10   * (at your option) any later version.
11   *
12   * This program is distributed in the hope that it will be useful,
13   * but WITHOUT ANY WARRANTY; without even the implied warranty of
14   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15   * GNU General Public License for more details.
16   *
17   * You should have received a copy of the GNU General Public License
18   * along with this program; if not, see <http://www.gnu.org/licenses/>.
19   */
20 
21 #include <linux/scatterlist.h>
22 #include <crypto/algapi.h>
23 #include <linux/err.h>
24 #include <crypto/scatterwalk.h>
25 #include <linux/atomic.h>
26 
27 #include <uapi/linux/virtio_crypto.h>
28 #include "virtio_crypto_common.h"
29 
30 
31 struct virtio_crypto_ablkcipher_ctx {
32 	struct virtio_crypto *vcrypto;
33 	struct crypto_tfm *tfm;
34 
35 	struct virtio_crypto_sym_session_info enc_sess_info;
36 	struct virtio_crypto_sym_session_info dec_sess_info;
37 };
38 
39 struct virtio_crypto_sym_request {
40 	struct virtio_crypto_request base;
41 
42 	/* Cipher or aead */
43 	uint32_t type;
44 	struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
45 	struct ablkcipher_request *ablkcipher_req;
46 	uint8_t *iv;
47 	/* Encryption? */
48 	bool encrypt;
49 };
50 
51 /*
52  * The algs_lock protects the below global virtio_crypto_active_devs
53  * and crypto algorithms registion.
54  */
55 static DEFINE_MUTEX(algs_lock);
56 static unsigned int virtio_crypto_active_devs;
57 static void virtio_crypto_ablkcipher_finalize_req(
58 	struct virtio_crypto_sym_request *vc_sym_req,
59 	struct ablkcipher_request *req,
60 	int err);
61 
virtio_crypto_dataq_sym_callback(struct virtio_crypto_request * vc_req,int len)62 static void virtio_crypto_dataq_sym_callback
63 		(struct virtio_crypto_request *vc_req, int len)
64 {
65 	struct virtio_crypto_sym_request *vc_sym_req =
66 		container_of(vc_req, struct virtio_crypto_sym_request, base);
67 	struct ablkcipher_request *ablk_req;
68 	int error;
69 
70 	/* Finish the encrypt or decrypt process */
71 	if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
72 		switch (vc_req->status) {
73 		case VIRTIO_CRYPTO_OK:
74 			error = 0;
75 			break;
76 		case VIRTIO_CRYPTO_INVSESS:
77 		case VIRTIO_CRYPTO_ERR:
78 			error = -EINVAL;
79 			break;
80 		case VIRTIO_CRYPTO_BADMSG:
81 			error = -EBADMSG;
82 			break;
83 		default:
84 			error = -EIO;
85 			break;
86 		}
87 		ablk_req = vc_sym_req->ablkcipher_req;
88 		virtio_crypto_ablkcipher_finalize_req(vc_sym_req,
89 							ablk_req, error);
90 	}
91 }
92 
virtio_crypto_alg_sg_nents_length(struct scatterlist * sg)93 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
94 {
95 	u64 total = 0;
96 
97 	for (total = 0; sg; sg = sg_next(sg))
98 		total += sg->length;
99 
100 	return total;
101 }
102 
103 static int
virtio_crypto_alg_validate_key(int key_len,uint32_t * alg)104 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
105 {
106 	switch (key_len) {
107 	case AES_KEYSIZE_128:
108 	case AES_KEYSIZE_192:
109 	case AES_KEYSIZE_256:
110 		*alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
111 		break;
112 	default:
113 		return -EINVAL;
114 	}
115 	return 0;
116 }
117 
virtio_crypto_alg_ablkcipher_init_session(struct virtio_crypto_ablkcipher_ctx * ctx,uint32_t alg,const uint8_t * key,unsigned int keylen,int encrypt)118 static int virtio_crypto_alg_ablkcipher_init_session(
119 		struct virtio_crypto_ablkcipher_ctx *ctx,
120 		uint32_t alg, const uint8_t *key,
121 		unsigned int keylen,
122 		int encrypt)
123 {
124 	struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
125 	unsigned int tmp;
126 	struct virtio_crypto *vcrypto = ctx->vcrypto;
127 	int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
128 	int err;
129 	unsigned int num_out = 0, num_in = 0;
130 
131 	/*
132 	 * Avoid to do DMA from the stack, switch to using
133 	 * dynamically-allocated for the key
134 	 */
135 	uint8_t *cipher_key = kmalloc(keylen, GFP_ATOMIC);
136 
137 	if (!cipher_key)
138 		return -ENOMEM;
139 
140 	memcpy(cipher_key, key, keylen);
141 
142 	spin_lock(&vcrypto->ctrl_lock);
143 	/* Pad ctrl header */
144 	vcrypto->ctrl.header.opcode =
145 		cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
146 	vcrypto->ctrl.header.algo = cpu_to_le32(alg);
147 	/* Set the default dataqueue id to 0 */
148 	vcrypto->ctrl.header.queue_id = 0;
149 
150 	vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
151 	/* Pad cipher's parameters */
152 	vcrypto->ctrl.u.sym_create_session.op_type =
153 		cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
154 	vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
155 		vcrypto->ctrl.header.algo;
156 	vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
157 		cpu_to_le32(keylen);
158 	vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
159 		cpu_to_le32(op);
160 
161 	sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
162 	sgs[num_out++] = &outhdr;
163 
164 	/* Set key */
165 	sg_init_one(&key_sg, cipher_key, keylen);
166 	sgs[num_out++] = &key_sg;
167 
168 	/* Return status and session id back */
169 	sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
170 	sgs[num_out + num_in++] = &inhdr;
171 
172 	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
173 				num_in, vcrypto, GFP_ATOMIC);
174 	if (err < 0) {
175 		spin_unlock(&vcrypto->ctrl_lock);
176 		kzfree(cipher_key);
177 		return err;
178 	}
179 	virtqueue_kick(vcrypto->ctrl_vq);
180 
181 	/*
182 	 * Trapping into the hypervisor, so the request should be
183 	 * handled immediately.
184 	 */
185 	while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
186 	       !virtqueue_is_broken(vcrypto->ctrl_vq))
187 		cpu_relax();
188 
189 	if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
190 		spin_unlock(&vcrypto->ctrl_lock);
191 		pr_err("virtio_crypto: Create session failed status: %u\n",
192 			le32_to_cpu(vcrypto->input.status));
193 		kzfree(cipher_key);
194 		return -EINVAL;
195 	}
196 
197 	if (encrypt)
198 		ctx->enc_sess_info.session_id =
199 			le64_to_cpu(vcrypto->input.session_id);
200 	else
201 		ctx->dec_sess_info.session_id =
202 			le64_to_cpu(vcrypto->input.session_id);
203 
204 	spin_unlock(&vcrypto->ctrl_lock);
205 
206 	kzfree(cipher_key);
207 	return 0;
208 }
209 
virtio_crypto_alg_ablkcipher_close_session(struct virtio_crypto_ablkcipher_ctx * ctx,int encrypt)210 static int virtio_crypto_alg_ablkcipher_close_session(
211 		struct virtio_crypto_ablkcipher_ctx *ctx,
212 		int encrypt)
213 {
214 	struct scatterlist outhdr, status_sg, *sgs[2];
215 	unsigned int tmp;
216 	struct virtio_crypto_destroy_session_req *destroy_session;
217 	struct virtio_crypto *vcrypto = ctx->vcrypto;
218 	int err;
219 	unsigned int num_out = 0, num_in = 0;
220 
221 	spin_lock(&vcrypto->ctrl_lock);
222 	vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
223 	/* Pad ctrl header */
224 	vcrypto->ctrl.header.opcode =
225 		cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
226 	/* Set the default virtqueue id to 0 */
227 	vcrypto->ctrl.header.queue_id = 0;
228 
229 	destroy_session = &vcrypto->ctrl.u.destroy_session;
230 
231 	if (encrypt)
232 		destroy_session->session_id =
233 			cpu_to_le64(ctx->enc_sess_info.session_id);
234 	else
235 		destroy_session->session_id =
236 			cpu_to_le64(ctx->dec_sess_info.session_id);
237 
238 	sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
239 	sgs[num_out++] = &outhdr;
240 
241 	/* Return status and session id back */
242 	sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
243 		sizeof(vcrypto->ctrl_status.status));
244 	sgs[num_out + num_in++] = &status_sg;
245 
246 	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
247 			num_in, vcrypto, GFP_ATOMIC);
248 	if (err < 0) {
249 		spin_unlock(&vcrypto->ctrl_lock);
250 		return err;
251 	}
252 	virtqueue_kick(vcrypto->ctrl_vq);
253 
254 	while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
255 	       !virtqueue_is_broken(vcrypto->ctrl_vq))
256 		cpu_relax();
257 
258 	if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
259 		spin_unlock(&vcrypto->ctrl_lock);
260 		pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
261 			vcrypto->ctrl_status.status,
262 			destroy_session->session_id);
263 
264 		return -EINVAL;
265 	}
266 	spin_unlock(&vcrypto->ctrl_lock);
267 
268 	return 0;
269 }
270 
virtio_crypto_alg_ablkcipher_init_sessions(struct virtio_crypto_ablkcipher_ctx * ctx,const uint8_t * key,unsigned int keylen)271 static int virtio_crypto_alg_ablkcipher_init_sessions(
272 		struct virtio_crypto_ablkcipher_ctx *ctx,
273 		const uint8_t *key, unsigned int keylen)
274 {
275 	uint32_t alg;
276 	int ret;
277 	struct virtio_crypto *vcrypto = ctx->vcrypto;
278 
279 	if (keylen > vcrypto->max_cipher_key_len) {
280 		pr_err("virtio_crypto: the key is too long\n");
281 		goto bad_key;
282 	}
283 
284 	if (virtio_crypto_alg_validate_key(keylen, &alg))
285 		goto bad_key;
286 
287 	/* Create encryption session */
288 	ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
289 			alg, key, keylen, 1);
290 	if (ret)
291 		return ret;
292 	/* Create decryption session */
293 	ret = virtio_crypto_alg_ablkcipher_init_session(ctx,
294 			alg, key, keylen, 0);
295 	if (ret) {
296 		virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
297 		return ret;
298 	}
299 	return 0;
300 
301 bad_key:
302 	crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
303 	return -EINVAL;
304 }
305 
306 /* Note: kernel crypto API realization */
virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher * tfm,const uint8_t * key,unsigned int keylen)307 static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
308 					 const uint8_t *key,
309 					 unsigned int keylen)
310 {
311 	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
312 	int ret;
313 
314 	if (!ctx->vcrypto) {
315 		/* New key */
316 		int node = virtio_crypto_get_current_node();
317 		struct virtio_crypto *vcrypto =
318 				      virtcrypto_get_dev_node(node);
319 		if (!vcrypto) {
320 			pr_err("virtio_crypto: Could not find a virtio device in the system");
321 			return -ENODEV;
322 		}
323 
324 		ctx->vcrypto = vcrypto;
325 	} else {
326 		/* Rekeying, we should close the created sessions previously */
327 		virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
328 		virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
329 	}
330 
331 	ret = virtio_crypto_alg_ablkcipher_init_sessions(ctx, key, keylen);
332 	if (ret) {
333 		virtcrypto_dev_put(ctx->vcrypto);
334 		ctx->vcrypto = NULL;
335 
336 		return ret;
337 	}
338 
339 	return 0;
340 }
341 
342 static int
__virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request * vc_sym_req,struct ablkcipher_request * req,struct data_queue * data_vq)343 __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
344 		struct ablkcipher_request *req,
345 		struct data_queue *data_vq)
346 {
347 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
348 	struct virtio_crypto_ablkcipher_ctx *ctx = vc_sym_req->ablkcipher_ctx;
349 	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
350 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
351 	struct virtio_crypto *vcrypto = ctx->vcrypto;
352 	struct virtio_crypto_op_data_req *req_data;
353 	int src_nents, dst_nents;
354 	int err;
355 	unsigned long flags;
356 	struct scatterlist outhdr, iv_sg, status_sg, **sgs;
357 	int i;
358 	u64 dst_len;
359 	unsigned int num_out = 0, num_in = 0;
360 	int sg_total;
361 	uint8_t *iv;
362 
363 	src_nents = sg_nents_for_len(req->src, req->nbytes);
364 	dst_nents = sg_nents(req->dst);
365 
366 	pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
367 			src_nents, dst_nents);
368 
369 	/* Why 3?  outhdr + iv + inhdr */
370 	sg_total = src_nents + dst_nents + 3;
371 	sgs = kzalloc_node(sg_total * sizeof(*sgs), GFP_ATOMIC,
372 				dev_to_node(&vcrypto->vdev->dev));
373 	if (!sgs)
374 		return -ENOMEM;
375 
376 	req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
377 				dev_to_node(&vcrypto->vdev->dev));
378 	if (!req_data) {
379 		kfree(sgs);
380 		return -ENOMEM;
381 	}
382 
383 	vc_req->req_data = req_data;
384 	vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
385 	/* Head of operation */
386 	if (vc_sym_req->encrypt) {
387 		req_data->header.session_id =
388 			cpu_to_le64(ctx->enc_sess_info.session_id);
389 		req_data->header.opcode =
390 			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
391 	} else {
392 		req_data->header.session_id =
393 			cpu_to_le64(ctx->dec_sess_info.session_id);
394 	    req_data->header.opcode =
395 			cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
396 	}
397 	req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
398 	req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
399 	req_data->u.sym_req.u.cipher.para.src_data_len =
400 			cpu_to_le32(req->nbytes);
401 
402 	dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
403 	if (unlikely(dst_len > U32_MAX)) {
404 		pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
405 		err = -EINVAL;
406 		goto free;
407 	}
408 
409 	pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
410 			req->nbytes, dst_len);
411 
412 	if (unlikely(req->nbytes + dst_len + ivsize +
413 		sizeof(vc_req->status) > vcrypto->max_size)) {
414 		pr_err("virtio_crypto: The length is too big\n");
415 		err = -EINVAL;
416 		goto free;
417 	}
418 
419 	req_data->u.sym_req.u.cipher.para.dst_data_len =
420 			cpu_to_le32((uint32_t)dst_len);
421 
422 	/* Outhdr */
423 	sg_init_one(&outhdr, req_data, sizeof(*req_data));
424 	sgs[num_out++] = &outhdr;
425 
426 	/* IV */
427 
428 	/*
429 	 * Avoid to do DMA from the stack, switch to using
430 	 * dynamically-allocated for the IV
431 	 */
432 	iv = kzalloc_node(ivsize, GFP_ATOMIC,
433 				dev_to_node(&vcrypto->vdev->dev));
434 	if (!iv) {
435 		err = -ENOMEM;
436 		goto free;
437 	}
438 	memcpy(iv, req->info, ivsize);
439 	sg_init_one(&iv_sg, iv, ivsize);
440 	sgs[num_out++] = &iv_sg;
441 	vc_sym_req->iv = iv;
442 
443 	/* Source data */
444 	for (i = 0; i < src_nents; i++)
445 		sgs[num_out++] = &req->src[i];
446 
447 	/* Destination data */
448 	for (i = 0; i < dst_nents; i++)
449 		sgs[num_out + num_in++] = &req->dst[i];
450 
451 	/* Status */
452 	sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
453 	sgs[num_out + num_in++] = &status_sg;
454 
455 	vc_req->sgs = sgs;
456 
457 	spin_lock_irqsave(&data_vq->lock, flags);
458 	err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
459 				num_in, vc_req, GFP_ATOMIC);
460 	virtqueue_kick(data_vq->vq);
461 	spin_unlock_irqrestore(&data_vq->lock, flags);
462 	if (unlikely(err < 0))
463 		goto free_iv;
464 
465 	return 0;
466 
467 free_iv:
468 	kzfree(iv);
469 free:
470 	kzfree(req_data);
471 	kfree(sgs);
472 	return err;
473 }
474 
virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request * req)475 static int virtio_crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
476 {
477 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
478 	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
479 	struct virtio_crypto_sym_request *vc_sym_req =
480 				ablkcipher_request_ctx(req);
481 	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
482 	struct virtio_crypto *vcrypto = ctx->vcrypto;
483 	/* Use the first data virtqueue as default */
484 	struct data_queue *data_vq = &vcrypto->data_vq[0];
485 
486 	if (!req->nbytes)
487 		return 0;
488 	if (req->nbytes % AES_BLOCK_SIZE)
489 		return -EINVAL;
490 
491 	vc_req->dataq = data_vq;
492 	vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
493 	vc_sym_req->ablkcipher_ctx = ctx;
494 	vc_sym_req->ablkcipher_req = req;
495 	vc_sym_req->encrypt = true;
496 
497 	return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
498 }
499 
virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request * req)500 static int virtio_crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
501 {
502 	struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
503 	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(atfm);
504 	struct virtio_crypto_sym_request *vc_sym_req =
505 				ablkcipher_request_ctx(req);
506 	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
507 	struct virtio_crypto *vcrypto = ctx->vcrypto;
508 	/* Use the first data virtqueue as default */
509 	struct data_queue *data_vq = &vcrypto->data_vq[0];
510 
511 	if (!req->nbytes)
512 		return 0;
513 	if (req->nbytes % AES_BLOCK_SIZE)
514 		return -EINVAL;
515 
516 	vc_req->dataq = data_vq;
517 	vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
518 	vc_sym_req->ablkcipher_ctx = ctx;
519 	vc_sym_req->ablkcipher_req = req;
520 	vc_sym_req->encrypt = false;
521 
522 	return crypto_transfer_cipher_request_to_engine(data_vq->engine, req);
523 }
524 
virtio_crypto_ablkcipher_init(struct crypto_tfm * tfm)525 static int virtio_crypto_ablkcipher_init(struct crypto_tfm *tfm)
526 {
527 	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
528 
529 	tfm->crt_ablkcipher.reqsize = sizeof(struct virtio_crypto_sym_request);
530 	ctx->tfm = tfm;
531 
532 	return 0;
533 }
534 
virtio_crypto_ablkcipher_exit(struct crypto_tfm * tfm)535 static void virtio_crypto_ablkcipher_exit(struct crypto_tfm *tfm)
536 {
537 	struct virtio_crypto_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
538 
539 	if (!ctx->vcrypto)
540 		return;
541 
542 	virtio_crypto_alg_ablkcipher_close_session(ctx, 1);
543 	virtio_crypto_alg_ablkcipher_close_session(ctx, 0);
544 	virtcrypto_dev_put(ctx->vcrypto);
545 	ctx->vcrypto = NULL;
546 }
547 
virtio_crypto_ablkcipher_crypt_req(struct crypto_engine * engine,struct ablkcipher_request * req)548 int virtio_crypto_ablkcipher_crypt_req(
549 	struct crypto_engine *engine,
550 	struct ablkcipher_request *req)
551 {
552 	struct virtio_crypto_sym_request *vc_sym_req =
553 				ablkcipher_request_ctx(req);
554 	struct virtio_crypto_request *vc_req = &vc_sym_req->base;
555 	struct data_queue *data_vq = vc_req->dataq;
556 	int ret;
557 
558 	ret = __virtio_crypto_ablkcipher_do_req(vc_sym_req, req, data_vq);
559 	if (ret < 0)
560 		return ret;
561 
562 	virtqueue_kick(data_vq->vq);
563 
564 	return 0;
565 }
566 
virtio_crypto_ablkcipher_finalize_req(struct virtio_crypto_sym_request * vc_sym_req,struct ablkcipher_request * req,int err)567 static void virtio_crypto_ablkcipher_finalize_req(
568 	struct virtio_crypto_sym_request *vc_sym_req,
569 	struct ablkcipher_request *req,
570 	int err)
571 {
572 	crypto_finalize_cipher_request(vc_sym_req->base.dataq->engine,
573 					req, err);
574 	kzfree(vc_sym_req->iv);
575 	virtcrypto_clear_request(&vc_sym_req->base);
576 }
577 
578 static struct crypto_alg virtio_crypto_algs[] = { {
579 	.cra_name = "cbc(aes)",
580 	.cra_driver_name = "virtio_crypto_aes_cbc",
581 	.cra_priority = 150,
582 	.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
583 	.cra_blocksize = AES_BLOCK_SIZE,
584 	.cra_ctxsize  = sizeof(struct virtio_crypto_ablkcipher_ctx),
585 	.cra_alignmask = 0,
586 	.cra_module = THIS_MODULE,
587 	.cra_type = &crypto_ablkcipher_type,
588 	.cra_init = virtio_crypto_ablkcipher_init,
589 	.cra_exit = virtio_crypto_ablkcipher_exit,
590 	.cra_u = {
591 	   .ablkcipher = {
592 			.setkey = virtio_crypto_ablkcipher_setkey,
593 			.decrypt = virtio_crypto_ablkcipher_decrypt,
594 			.encrypt = virtio_crypto_ablkcipher_encrypt,
595 			.min_keysize = AES_MIN_KEY_SIZE,
596 			.max_keysize = AES_MAX_KEY_SIZE,
597 			.ivsize = AES_BLOCK_SIZE,
598 		},
599 	},
600 } };
601 
virtio_crypto_algs_register(void)602 int virtio_crypto_algs_register(void)
603 {
604 	int ret = 0;
605 
606 	mutex_lock(&algs_lock);
607 	if (++virtio_crypto_active_devs != 1)
608 		goto unlock;
609 
610 	ret = crypto_register_algs(virtio_crypto_algs,
611 			ARRAY_SIZE(virtio_crypto_algs));
612 	if (ret)
613 		virtio_crypto_active_devs--;
614 
615 unlock:
616 	mutex_unlock(&algs_lock);
617 	return ret;
618 }
619 
virtio_crypto_algs_unregister(void)620 void virtio_crypto_algs_unregister(void)
621 {
622 	mutex_lock(&algs_lock);
623 	if (--virtio_crypto_active_devs != 0)
624 		goto unlock;
625 
626 	crypto_unregister_algs(virtio_crypto_algs,
627 			ARRAY_SIZE(virtio_crypto_algs));
628 
629 unlock:
630 	mutex_unlock(&algs_lock);
631 }
632