1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Virtio crypto device.
3 *
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5 */
6
7 #include <linux/err.h>
8 #include <linux/module.h>
9 #include <linux/virtio_config.h>
10 #include <linux/cpu.h>
11
12 #include <uapi/linux/virtio_crypto.h>
13 #include "virtio_crypto_common.h"
14
15
16 void
virtcrypto_clear_request(struct virtio_crypto_request * vc_req)17 virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
18 {
19 if (vc_req) {
20 kfree_sensitive(vc_req->req_data);
21 kfree(vc_req->sgs);
22 }
23 }
24
virtcrypto_done_task(unsigned long data)25 static void virtcrypto_done_task(unsigned long data)
26 {
27 struct data_queue *data_vq = (struct data_queue *)data;
28 struct virtqueue *vq = data_vq->vq;
29 struct virtio_crypto_request *vc_req;
30 unsigned int len;
31
32 do {
33 virtqueue_disable_cb(vq);
34 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
35 if (vc_req->alg_cb)
36 vc_req->alg_cb(vc_req, len);
37 }
38 } while (!virtqueue_enable_cb(vq));
39 }
40
virtcrypto_dataq_callback(struct virtqueue * vq)41 static void virtcrypto_dataq_callback(struct virtqueue *vq)
42 {
43 struct virtio_crypto *vcrypto = vq->vdev->priv;
44 struct data_queue *dq = &vcrypto->data_vq[vq->index];
45
46 tasklet_schedule(&dq->done_task);
47 }
48
virtcrypto_find_vqs(struct virtio_crypto * vi)49 static int virtcrypto_find_vqs(struct virtio_crypto *vi)
50 {
51 vq_callback_t **callbacks;
52 struct virtqueue **vqs;
53 int ret = -ENOMEM;
54 int i, total_vqs;
55 const char **names;
56 struct device *dev = &vi->vdev->dev;
57
58 /*
59 * We expect 1 data virtqueue, followed by
60 * possible N-1 data queues used in multiqueue mode,
61 * followed by control vq.
62 */
63 total_vqs = vi->max_data_queues + 1;
64
65 /* Allocate space for find_vqs parameters */
66 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
67 if (!vqs)
68 goto err_vq;
69 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
70 if (!callbacks)
71 goto err_callback;
72 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
73 if (!names)
74 goto err_names;
75
76 /* Parameters for control virtqueue */
77 callbacks[total_vqs - 1] = NULL;
78 names[total_vqs - 1] = "controlq";
79
80 /* Allocate/initialize parameters for data virtqueues */
81 for (i = 0; i < vi->max_data_queues; i++) {
82 callbacks[i] = virtcrypto_dataq_callback;
83 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
84 "dataq.%d", i);
85 names[i] = vi->data_vq[i].name;
86 }
87
88 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
89 if (ret)
90 goto err_find;
91
92 vi->ctrl_vq = vqs[total_vqs - 1];
93
94 for (i = 0; i < vi->max_data_queues; i++) {
95 spin_lock_init(&vi->data_vq[i].lock);
96 vi->data_vq[i].vq = vqs[i];
97 /* Initialize crypto engine */
98 vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
99 if (!vi->data_vq[i].engine) {
100 ret = -ENOMEM;
101 goto err_engine;
102 }
103 tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
104 (unsigned long)&vi->data_vq[i]);
105 }
106
107 kfree(names);
108 kfree(callbacks);
109 kfree(vqs);
110
111 return 0;
112
113 err_engine:
114 err_find:
115 kfree(names);
116 err_names:
117 kfree(callbacks);
118 err_callback:
119 kfree(vqs);
120 err_vq:
121 return ret;
122 }
123
virtcrypto_alloc_queues(struct virtio_crypto * vi)124 static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
125 {
126 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
127 GFP_KERNEL);
128 if (!vi->data_vq)
129 return -ENOMEM;
130
131 return 0;
132 }
133
virtcrypto_clean_affinity(struct virtio_crypto * vi,long hcpu)134 static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
135 {
136 int i;
137
138 if (vi->affinity_hint_set) {
139 for (i = 0; i < vi->max_data_queues; i++)
140 virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
141
142 vi->affinity_hint_set = false;
143 }
144 }
145
virtcrypto_set_affinity(struct virtio_crypto * vcrypto)146 static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
147 {
148 int i = 0;
149 int cpu;
150
151 /*
152 * In single queue mode, we don't set the cpu affinity.
153 */
154 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
155 virtcrypto_clean_affinity(vcrypto, -1);
156 return;
157 }
158
159 /*
160 * In multiqueue mode, we let the queue to be private to one cpu
161 * by setting the affinity hint to eliminate the contention.
162 *
163 * TODO: adds cpu hotplug support by register cpu notifier.
164 *
165 */
166 for_each_online_cpu(cpu) {
167 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
168 if (++i >= vcrypto->max_data_queues)
169 break;
170 }
171
172 vcrypto->affinity_hint_set = true;
173 }
174
virtcrypto_free_queues(struct virtio_crypto * vi)175 static void virtcrypto_free_queues(struct virtio_crypto *vi)
176 {
177 kfree(vi->data_vq);
178 }
179
virtcrypto_init_vqs(struct virtio_crypto * vi)180 static int virtcrypto_init_vqs(struct virtio_crypto *vi)
181 {
182 int ret;
183
184 /* Allocate send & receive queues */
185 ret = virtcrypto_alloc_queues(vi);
186 if (ret)
187 goto err;
188
189 ret = virtcrypto_find_vqs(vi);
190 if (ret)
191 goto err_free;
192
193 cpus_read_lock();
194 virtcrypto_set_affinity(vi);
195 cpus_read_unlock();
196
197 return 0;
198
199 err_free:
200 virtcrypto_free_queues(vi);
201 err:
202 return ret;
203 }
204
virtcrypto_update_status(struct virtio_crypto * vcrypto)205 static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
206 {
207 u32 status;
208 int err;
209
210 virtio_cread_le(vcrypto->vdev,
211 struct virtio_crypto_config, status, &status);
212
213 /*
214 * Unknown status bits would be a host error and the driver
215 * should consider the device to be broken.
216 */
217 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
218 dev_warn(&vcrypto->vdev->dev,
219 "Unknown status bits: 0x%x\n", status);
220
221 virtio_break_device(vcrypto->vdev);
222 return -EPERM;
223 }
224
225 if (vcrypto->status == status)
226 return 0;
227
228 vcrypto->status = status;
229
230 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
231 err = virtcrypto_dev_start(vcrypto);
232 if (err) {
233 dev_err(&vcrypto->vdev->dev,
234 "Failed to start virtio crypto device.\n");
235
236 return -EPERM;
237 }
238 dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
239 } else {
240 virtcrypto_dev_stop(vcrypto);
241 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
242 }
243
244 return 0;
245 }
246
virtcrypto_start_crypto_engines(struct virtio_crypto * vcrypto)247 static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
248 {
249 int32_t i;
250 int ret;
251
252 for (i = 0; i < vcrypto->max_data_queues; i++) {
253 if (vcrypto->data_vq[i].engine) {
254 ret = crypto_engine_start(vcrypto->data_vq[i].engine);
255 if (ret)
256 goto err;
257 }
258 }
259
260 return 0;
261
262 err:
263 while (--i >= 0)
264 if (vcrypto->data_vq[i].engine)
265 crypto_engine_exit(vcrypto->data_vq[i].engine);
266
267 return ret;
268 }
269
virtcrypto_clear_crypto_engines(struct virtio_crypto * vcrypto)270 static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
271 {
272 u32 i;
273
274 for (i = 0; i < vcrypto->max_data_queues; i++)
275 if (vcrypto->data_vq[i].engine)
276 crypto_engine_exit(vcrypto->data_vq[i].engine);
277 }
278
virtcrypto_del_vqs(struct virtio_crypto * vcrypto)279 static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
280 {
281 struct virtio_device *vdev = vcrypto->vdev;
282
283 virtcrypto_clean_affinity(vcrypto, -1);
284
285 vdev->config->del_vqs(vdev);
286
287 virtcrypto_free_queues(vcrypto);
288 }
289
virtcrypto_probe(struct virtio_device * vdev)290 static int virtcrypto_probe(struct virtio_device *vdev)
291 {
292 int err = -EFAULT;
293 struct virtio_crypto *vcrypto;
294 u32 max_data_queues = 0, max_cipher_key_len = 0;
295 u32 max_auth_key_len = 0;
296 u64 max_size = 0;
297 u32 cipher_algo_l = 0;
298 u32 cipher_algo_h = 0;
299 u32 hash_algo = 0;
300 u32 mac_algo_l = 0;
301 u32 mac_algo_h = 0;
302 u32 aead_algo = 0;
303 u32 crypto_services = 0;
304
305 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
306 return -ENODEV;
307
308 if (!vdev->config->get) {
309 dev_err(&vdev->dev, "%s failure: config access disabled\n",
310 __func__);
311 return -EINVAL;
312 }
313
314 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
315 /*
316 * If the accelerator is connected to a node with no memory
317 * there is no point in using the accelerator since the remote
318 * memory transaction will be very slow.
319 */
320 dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
321 return -EINVAL;
322 }
323
324 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
325 dev_to_node(&vdev->dev));
326 if (!vcrypto)
327 return -ENOMEM;
328
329 virtio_cread_le(vdev, struct virtio_crypto_config,
330 max_dataqueues, &max_data_queues);
331 if (max_data_queues < 1)
332 max_data_queues = 1;
333
334 virtio_cread_le(vdev, struct virtio_crypto_config,
335 max_cipher_key_len, &max_cipher_key_len);
336 virtio_cread_le(vdev, struct virtio_crypto_config,
337 max_auth_key_len, &max_auth_key_len);
338 virtio_cread_le(vdev, struct virtio_crypto_config,
339 max_size, &max_size);
340 virtio_cread_le(vdev, struct virtio_crypto_config,
341 crypto_services, &crypto_services);
342 virtio_cread_le(vdev, struct virtio_crypto_config,
343 cipher_algo_l, &cipher_algo_l);
344 virtio_cread_le(vdev, struct virtio_crypto_config,
345 cipher_algo_h, &cipher_algo_h);
346 virtio_cread_le(vdev, struct virtio_crypto_config,
347 hash_algo, &hash_algo);
348 virtio_cread_le(vdev, struct virtio_crypto_config,
349 mac_algo_l, &mac_algo_l);
350 virtio_cread_le(vdev, struct virtio_crypto_config,
351 mac_algo_h, &mac_algo_h);
352 virtio_cread_le(vdev, struct virtio_crypto_config,
353 aead_algo, &aead_algo);
354
355 /* Add virtio crypto device to global table */
356 err = virtcrypto_devmgr_add_dev(vcrypto);
357 if (err) {
358 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
359 goto free;
360 }
361 vcrypto->owner = THIS_MODULE;
362 vcrypto = vdev->priv = vcrypto;
363 vcrypto->vdev = vdev;
364
365 spin_lock_init(&vcrypto->ctrl_lock);
366
367 /* Use single data queue as default */
368 vcrypto->curr_queue = 1;
369 vcrypto->max_data_queues = max_data_queues;
370 vcrypto->max_cipher_key_len = max_cipher_key_len;
371 vcrypto->max_auth_key_len = max_auth_key_len;
372 vcrypto->max_size = max_size;
373 vcrypto->crypto_services = crypto_services;
374 vcrypto->cipher_algo_l = cipher_algo_l;
375 vcrypto->cipher_algo_h = cipher_algo_h;
376 vcrypto->mac_algo_l = mac_algo_l;
377 vcrypto->mac_algo_h = mac_algo_h;
378 vcrypto->hash_algo = hash_algo;
379 vcrypto->aead_algo = aead_algo;
380
381
382 dev_info(&vdev->dev,
383 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
384 vcrypto->max_data_queues,
385 vcrypto->max_cipher_key_len,
386 vcrypto->max_auth_key_len,
387 vcrypto->max_size);
388
389 err = virtcrypto_init_vqs(vcrypto);
390 if (err) {
391 dev_err(&vdev->dev, "Failed to initialize vqs.\n");
392 goto free_dev;
393 }
394
395 err = virtcrypto_start_crypto_engines(vcrypto);
396 if (err)
397 goto free_vqs;
398
399 virtio_device_ready(vdev);
400
401 err = virtcrypto_update_status(vcrypto);
402 if (err)
403 goto free_engines;
404
405 return 0;
406
407 free_engines:
408 virtcrypto_clear_crypto_engines(vcrypto);
409 free_vqs:
410 vcrypto->vdev->config->reset(vdev);
411 virtcrypto_del_vqs(vcrypto);
412 free_dev:
413 virtcrypto_devmgr_rm_dev(vcrypto);
414 free:
415 kfree(vcrypto);
416 return err;
417 }
418
virtcrypto_free_unused_reqs(struct virtio_crypto * vcrypto)419 static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
420 {
421 struct virtio_crypto_request *vc_req;
422 int i;
423 struct virtqueue *vq;
424
425 for (i = 0; i < vcrypto->max_data_queues; i++) {
426 vq = vcrypto->data_vq[i].vq;
427 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
428 kfree(vc_req->req_data);
429 kfree(vc_req->sgs);
430 }
431 }
432 }
433
virtcrypto_remove(struct virtio_device * vdev)434 static void virtcrypto_remove(struct virtio_device *vdev)
435 {
436 struct virtio_crypto *vcrypto = vdev->priv;
437 int i;
438
439 dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
440
441 if (virtcrypto_dev_started(vcrypto))
442 virtcrypto_dev_stop(vcrypto);
443 for (i = 0; i < vcrypto->max_data_queues; i++)
444 tasklet_kill(&vcrypto->data_vq[i].done_task);
445 vdev->config->reset(vdev);
446 virtcrypto_free_unused_reqs(vcrypto);
447 virtcrypto_clear_crypto_engines(vcrypto);
448 virtcrypto_del_vqs(vcrypto);
449 virtcrypto_devmgr_rm_dev(vcrypto);
450 kfree(vcrypto);
451 }
452
virtcrypto_config_changed(struct virtio_device * vdev)453 static void virtcrypto_config_changed(struct virtio_device *vdev)
454 {
455 struct virtio_crypto *vcrypto = vdev->priv;
456
457 virtcrypto_update_status(vcrypto);
458 }
459
460 #ifdef CONFIG_PM_SLEEP
virtcrypto_freeze(struct virtio_device * vdev)461 static int virtcrypto_freeze(struct virtio_device *vdev)
462 {
463 struct virtio_crypto *vcrypto = vdev->priv;
464
465 vdev->config->reset(vdev);
466 virtcrypto_free_unused_reqs(vcrypto);
467 if (virtcrypto_dev_started(vcrypto))
468 virtcrypto_dev_stop(vcrypto);
469
470 virtcrypto_clear_crypto_engines(vcrypto);
471 virtcrypto_del_vqs(vcrypto);
472 return 0;
473 }
474
virtcrypto_restore(struct virtio_device * vdev)475 static int virtcrypto_restore(struct virtio_device *vdev)
476 {
477 struct virtio_crypto *vcrypto = vdev->priv;
478 int err;
479
480 err = virtcrypto_init_vqs(vcrypto);
481 if (err)
482 return err;
483
484 err = virtcrypto_start_crypto_engines(vcrypto);
485 if (err)
486 goto free_vqs;
487
488 virtio_device_ready(vdev);
489
490 err = virtcrypto_dev_start(vcrypto);
491 if (err) {
492 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
493 goto free_engines;
494 }
495
496 return 0;
497
498 free_engines:
499 virtcrypto_clear_crypto_engines(vcrypto);
500 free_vqs:
501 vcrypto->vdev->config->reset(vdev);
502 virtcrypto_del_vqs(vcrypto);
503 return err;
504 }
505 #endif
506
507 static const unsigned int features[] = {
508 /* none */
509 };
510
511 static const struct virtio_device_id id_table[] = {
512 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
513 { 0 },
514 };
515
516 static struct virtio_driver virtio_crypto_driver = {
517 .driver.name = KBUILD_MODNAME,
518 .driver.owner = THIS_MODULE,
519 .feature_table = features,
520 .feature_table_size = ARRAY_SIZE(features),
521 .id_table = id_table,
522 .probe = virtcrypto_probe,
523 .remove = virtcrypto_remove,
524 .config_changed = virtcrypto_config_changed,
525 #ifdef CONFIG_PM_SLEEP
526 .freeze = virtcrypto_freeze,
527 .restore = virtcrypto_restore,
528 #endif
529 };
530
531 module_virtio_driver(virtio_crypto_driver);
532
533 MODULE_DEVICE_TABLE(virtio, id_table);
534 MODULE_DESCRIPTION("virtio crypto device driver");
535 MODULE_LICENSE("GPL");
536 MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
537