1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * virtio transport for vsock
4 *
5 * Copyright (C) 2013-2015 Red Hat, Inc.
6 * Author: Asias He <asias@redhat.com>
7 * Stefan Hajnoczi <stefanha@redhat.com>
8 *
9 * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
10 * early virtio-vsock proof-of-concept bits.
11 */
12 #include <linux/spinlock.h>
13 #include <linux/module.h>
14 #include <linux/list.h>
15 #include <linux/atomic.h>
16 #include <linux/virtio.h>
17 #include <linux/virtio_ids.h>
18 #include <linux/virtio_config.h>
19 #include <linux/virtio_vsock.h>
20 #include <net/sock.h>
21 #include <linux/mutex.h>
22 #include <net/af_vsock.h>
23
24 static struct workqueue_struct *virtio_vsock_workqueue;
25 static struct virtio_vsock __rcu *the_virtio_vsock;
26 static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
27 static struct virtio_transport virtio_transport; /* forward declaration */
28
29 struct virtio_vsock {
30 struct virtio_device *vdev;
31 struct virtqueue *vqs[VSOCK_VQ_MAX];
32
33 /* Virtqueue processing is deferred to a workqueue */
34 struct work_struct tx_work;
35 struct work_struct rx_work;
36 struct work_struct event_work;
37
38 /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
39 * must be accessed with tx_lock held.
40 */
41 struct mutex tx_lock;
42 bool tx_run;
43
44 struct work_struct send_pkt_work;
45 struct sk_buff_head send_pkt_queue;
46
47 atomic_t queued_replies;
48
49 /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
50 * must be accessed with rx_lock held.
51 */
52 struct mutex rx_lock;
53 bool rx_run;
54 int rx_buf_nr;
55 int rx_buf_max_nr;
56
57 /* The following fields are protected by event_lock.
58 * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
59 */
60 struct mutex event_lock;
61 bool event_run;
62 struct virtio_vsock_event event_list[8];
63
64 u32 guest_cid;
65 bool seqpacket_allow;
66 };
67
virtio_transport_get_local_cid(void)68 static u32 virtio_transport_get_local_cid(void)
69 {
70 struct virtio_vsock *vsock;
71 u32 ret;
72
73 rcu_read_lock();
74 vsock = rcu_dereference(the_virtio_vsock);
75 if (!vsock) {
76 ret = VMADDR_CID_ANY;
77 goto out_rcu;
78 }
79
80 ret = vsock->guest_cid;
81 out_rcu:
82 rcu_read_unlock();
83 return ret;
84 }
85
86 static void
virtio_transport_send_pkt_work(struct work_struct * work)87 virtio_transport_send_pkt_work(struct work_struct *work)
88 {
89 struct virtio_vsock *vsock =
90 container_of(work, struct virtio_vsock, send_pkt_work);
91 struct virtqueue *vq;
92 bool added = false;
93 bool restart_rx = false;
94
95 mutex_lock(&vsock->tx_lock);
96
97 if (!vsock->tx_run)
98 goto out;
99
100 vq = vsock->vqs[VSOCK_VQ_TX];
101
102 for (;;) {
103 struct scatterlist hdr, buf, *sgs[2];
104 int ret, in_sg = 0, out_sg = 0;
105 struct sk_buff *skb;
106 bool reply;
107
108 skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
109 if (!skb)
110 break;
111
112 virtio_transport_deliver_tap_pkt(skb);
113 reply = virtio_vsock_skb_reply(skb);
114
115 sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
116 sgs[out_sg++] = &hdr;
117 if (skb->len > 0) {
118 sg_init_one(&buf, skb->data, skb->len);
119 sgs[out_sg++] = &buf;
120 }
121
122 ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
123 /* Usually this means that there is no more space available in
124 * the vq
125 */
126 if (ret < 0) {
127 virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
128 break;
129 }
130
131 if (reply) {
132 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
133 int val;
134
135 val = atomic_dec_return(&vsock->queued_replies);
136
137 /* Do we now have resources to resume rx processing? */
138 if (val + 1 == virtqueue_get_vring_size(rx_vq))
139 restart_rx = true;
140 }
141
142 added = true;
143 }
144
145 if (added)
146 virtqueue_kick(vq);
147
148 out:
149 mutex_unlock(&vsock->tx_lock);
150
151 if (restart_rx)
152 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
153 }
154
155 static int
virtio_transport_send_pkt(struct sk_buff * skb)156 virtio_transport_send_pkt(struct sk_buff *skb)
157 {
158 struct virtio_vsock_hdr *hdr;
159 struct virtio_vsock *vsock;
160 int len = skb->len;
161
162 hdr = virtio_vsock_hdr(skb);
163
164 rcu_read_lock();
165 vsock = rcu_dereference(the_virtio_vsock);
166 if (!vsock) {
167 kfree_skb(skb);
168 len = -ENODEV;
169 goto out_rcu;
170 }
171
172 if (le64_to_cpu(hdr->dst_cid) == vsock->guest_cid) {
173 kfree_skb(skb);
174 len = -ENODEV;
175 goto out_rcu;
176 }
177
178 if (virtio_vsock_skb_reply(skb))
179 atomic_inc(&vsock->queued_replies);
180
181 virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
182 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
183
184 out_rcu:
185 rcu_read_unlock();
186 return len;
187 }
188
189 static int
virtio_transport_cancel_pkt(struct vsock_sock * vsk)190 virtio_transport_cancel_pkt(struct vsock_sock *vsk)
191 {
192 struct virtio_vsock *vsock;
193 int cnt = 0, ret;
194
195 rcu_read_lock();
196 vsock = rcu_dereference(the_virtio_vsock);
197 if (!vsock) {
198 ret = -ENODEV;
199 goto out_rcu;
200 }
201
202 cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
203
204 if (cnt) {
205 struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
206 int new_cnt;
207
208 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
209 if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
210 new_cnt < virtqueue_get_vring_size(rx_vq))
211 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
212 }
213
214 ret = 0;
215
216 out_rcu:
217 rcu_read_unlock();
218 return ret;
219 }
220
virtio_vsock_rx_fill(struct virtio_vsock * vsock)221 static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
222 {
223 int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
224 struct scatterlist pkt, *p;
225 struct virtqueue *vq;
226 struct sk_buff *skb;
227 int ret;
228
229 vq = vsock->vqs[VSOCK_VQ_RX];
230
231 do {
232 skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
233 if (!skb)
234 break;
235
236 memset(skb->head, 0, VIRTIO_VSOCK_SKB_HEADROOM);
237 sg_init_one(&pkt, virtio_vsock_hdr(skb), total_len);
238 p = &pkt;
239 ret = virtqueue_add_sgs(vq, &p, 0, 1, skb, GFP_KERNEL);
240 if (ret < 0) {
241 kfree_skb(skb);
242 break;
243 }
244
245 vsock->rx_buf_nr++;
246 } while (vq->num_free);
247 if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
248 vsock->rx_buf_max_nr = vsock->rx_buf_nr;
249 virtqueue_kick(vq);
250 }
251
virtio_transport_tx_work(struct work_struct * work)252 static void virtio_transport_tx_work(struct work_struct *work)
253 {
254 struct virtio_vsock *vsock =
255 container_of(work, struct virtio_vsock, tx_work);
256 struct virtqueue *vq;
257 bool added = false;
258
259 vq = vsock->vqs[VSOCK_VQ_TX];
260 mutex_lock(&vsock->tx_lock);
261
262 if (!vsock->tx_run)
263 goto out;
264
265 do {
266 struct sk_buff *skb;
267 unsigned int len;
268
269 virtqueue_disable_cb(vq);
270 while ((skb = virtqueue_get_buf(vq, &len)) != NULL) {
271 consume_skb(skb);
272 added = true;
273 }
274 } while (!virtqueue_enable_cb(vq));
275
276 out:
277 mutex_unlock(&vsock->tx_lock);
278
279 if (added)
280 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
281 }
282
283 /* Is there space left for replies to rx packets? */
virtio_transport_more_replies(struct virtio_vsock * vsock)284 static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
285 {
286 struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
287 int val;
288
289 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
290 val = atomic_read(&vsock->queued_replies);
291
292 return val < virtqueue_get_vring_size(vq);
293 }
294
295 /* event_lock must be held */
virtio_vsock_event_fill_one(struct virtio_vsock * vsock,struct virtio_vsock_event * event)296 static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
297 struct virtio_vsock_event *event)
298 {
299 struct scatterlist sg;
300 struct virtqueue *vq;
301
302 vq = vsock->vqs[VSOCK_VQ_EVENT];
303
304 sg_init_one(&sg, event, sizeof(*event));
305
306 return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
307 }
308
309 /* event_lock must be held */
virtio_vsock_event_fill(struct virtio_vsock * vsock)310 static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
311 {
312 size_t i;
313
314 for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
315 struct virtio_vsock_event *event = &vsock->event_list[i];
316
317 virtio_vsock_event_fill_one(vsock, event);
318 }
319
320 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
321 }
322
virtio_vsock_reset_sock(struct sock * sk)323 static void virtio_vsock_reset_sock(struct sock *sk)
324 {
325 /* vmci_transport.c doesn't take sk_lock here either. At least we're
326 * under vsock_table_lock so the sock cannot disappear while we're
327 * executing.
328 */
329
330 sk->sk_state = TCP_CLOSE;
331 sk->sk_err = ECONNRESET;
332 sk_error_report(sk);
333 }
334
virtio_vsock_update_guest_cid(struct virtio_vsock * vsock)335 static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
336 {
337 struct virtio_device *vdev = vsock->vdev;
338 __le64 guest_cid;
339
340 vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
341 &guest_cid, sizeof(guest_cid));
342 vsock->guest_cid = le64_to_cpu(guest_cid);
343 }
344
345 /* event_lock must be held */
virtio_vsock_event_handle(struct virtio_vsock * vsock,struct virtio_vsock_event * event)346 static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
347 struct virtio_vsock_event *event)
348 {
349 switch (le32_to_cpu(event->id)) {
350 case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
351 virtio_vsock_update_guest_cid(vsock);
352 vsock_for_each_connected_socket(&virtio_transport.transport,
353 virtio_vsock_reset_sock);
354 break;
355 }
356 }
357
virtio_transport_event_work(struct work_struct * work)358 static void virtio_transport_event_work(struct work_struct *work)
359 {
360 struct virtio_vsock *vsock =
361 container_of(work, struct virtio_vsock, event_work);
362 struct virtqueue *vq;
363
364 vq = vsock->vqs[VSOCK_VQ_EVENT];
365
366 mutex_lock(&vsock->event_lock);
367
368 if (!vsock->event_run)
369 goto out;
370
371 do {
372 struct virtio_vsock_event *event;
373 unsigned int len;
374
375 virtqueue_disable_cb(vq);
376 while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
377 if (len == sizeof(*event))
378 virtio_vsock_event_handle(vsock, event);
379
380 virtio_vsock_event_fill_one(vsock, event);
381 }
382 } while (!virtqueue_enable_cb(vq));
383
384 virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
385 out:
386 mutex_unlock(&vsock->event_lock);
387 }
388
virtio_vsock_event_done(struct virtqueue * vq)389 static void virtio_vsock_event_done(struct virtqueue *vq)
390 {
391 struct virtio_vsock *vsock = vq->vdev->priv;
392
393 if (!vsock)
394 return;
395 queue_work(virtio_vsock_workqueue, &vsock->event_work);
396 }
397
virtio_vsock_tx_done(struct virtqueue * vq)398 static void virtio_vsock_tx_done(struct virtqueue *vq)
399 {
400 struct virtio_vsock *vsock = vq->vdev->priv;
401
402 if (!vsock)
403 return;
404 queue_work(virtio_vsock_workqueue, &vsock->tx_work);
405 }
406
virtio_vsock_rx_done(struct virtqueue * vq)407 static void virtio_vsock_rx_done(struct virtqueue *vq)
408 {
409 struct virtio_vsock *vsock = vq->vdev->priv;
410
411 if (!vsock)
412 return;
413 queue_work(virtio_vsock_workqueue, &vsock->rx_work);
414 }
415
416 static bool virtio_transport_seqpacket_allow(u32 remote_cid);
417
418 static struct virtio_transport virtio_transport = {
419 .transport = {
420 .module = THIS_MODULE,
421
422 .get_local_cid = virtio_transport_get_local_cid,
423
424 .init = virtio_transport_do_socket_init,
425 .destruct = virtio_transport_destruct,
426 .release = virtio_transport_release,
427 .connect = virtio_transport_connect,
428 .shutdown = virtio_transport_shutdown,
429 .cancel_pkt = virtio_transport_cancel_pkt,
430
431 .dgram_bind = virtio_transport_dgram_bind,
432 .dgram_dequeue = virtio_transport_dgram_dequeue,
433 .dgram_enqueue = virtio_transport_dgram_enqueue,
434 .dgram_allow = virtio_transport_dgram_allow,
435
436 .stream_dequeue = virtio_transport_stream_dequeue,
437 .stream_enqueue = virtio_transport_stream_enqueue,
438 .stream_has_data = virtio_transport_stream_has_data,
439 .stream_has_space = virtio_transport_stream_has_space,
440 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
441 .stream_is_active = virtio_transport_stream_is_active,
442 .stream_allow = virtio_transport_stream_allow,
443
444 .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
445 .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
446 .seqpacket_allow = virtio_transport_seqpacket_allow,
447 .seqpacket_has_data = virtio_transport_seqpacket_has_data,
448
449 .notify_poll_in = virtio_transport_notify_poll_in,
450 .notify_poll_out = virtio_transport_notify_poll_out,
451 .notify_recv_init = virtio_transport_notify_recv_init,
452 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
453 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
454 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
455 .notify_send_init = virtio_transport_notify_send_init,
456 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
457 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
458 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
459 .notify_buffer_size = virtio_transport_notify_buffer_size,
460 .notify_set_rcvlowat = virtio_transport_notify_set_rcvlowat,
461
462 .read_skb = virtio_transport_read_skb,
463 },
464
465 .send_pkt = virtio_transport_send_pkt,
466 };
467
virtio_transport_seqpacket_allow(u32 remote_cid)468 static bool virtio_transport_seqpacket_allow(u32 remote_cid)
469 {
470 struct virtio_vsock *vsock;
471 bool seqpacket_allow;
472
473 seqpacket_allow = false;
474 rcu_read_lock();
475 vsock = rcu_dereference(the_virtio_vsock);
476 if (vsock)
477 seqpacket_allow = vsock->seqpacket_allow;
478 rcu_read_unlock();
479
480 return seqpacket_allow;
481 }
482
virtio_transport_rx_work(struct work_struct * work)483 static void virtio_transport_rx_work(struct work_struct *work)
484 {
485 struct virtio_vsock *vsock =
486 container_of(work, struct virtio_vsock, rx_work);
487 struct virtqueue *vq;
488
489 vq = vsock->vqs[VSOCK_VQ_RX];
490
491 mutex_lock(&vsock->rx_lock);
492
493 if (!vsock->rx_run)
494 goto out;
495
496 do {
497 virtqueue_disable_cb(vq);
498 for (;;) {
499 struct sk_buff *skb;
500 unsigned int len;
501
502 if (!virtio_transport_more_replies(vsock)) {
503 /* Stop rx until the device processes already
504 * pending replies. Leave rx virtqueue
505 * callbacks disabled.
506 */
507 goto out;
508 }
509
510 skb = virtqueue_get_buf(vq, &len);
511 if (!skb)
512 break;
513
514 vsock->rx_buf_nr--;
515
516 /* Drop short/long packets */
517 if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
518 len > virtio_vsock_skb_len(skb))) {
519 kfree_skb(skb);
520 continue;
521 }
522
523 virtio_vsock_skb_rx_put(skb);
524 virtio_transport_deliver_tap_pkt(skb);
525 virtio_transport_recv_pkt(&virtio_transport, skb);
526 }
527 } while (!virtqueue_enable_cb(vq));
528
529 out:
530 if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
531 virtio_vsock_rx_fill(vsock);
532 mutex_unlock(&vsock->rx_lock);
533 }
534
virtio_vsock_vqs_init(struct virtio_vsock * vsock)535 static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
536 {
537 struct virtio_device *vdev = vsock->vdev;
538 static const char * const names[] = {
539 "rx",
540 "tx",
541 "event",
542 };
543 vq_callback_t *callbacks[] = {
544 virtio_vsock_rx_done,
545 virtio_vsock_tx_done,
546 virtio_vsock_event_done,
547 };
548 int ret;
549
550 ret = virtio_find_vqs(vdev, VSOCK_VQ_MAX, vsock->vqs, callbacks, names,
551 NULL);
552 if (ret < 0)
553 return ret;
554
555 virtio_vsock_update_guest_cid(vsock);
556
557 virtio_device_ready(vdev);
558
559 return 0;
560 }
561
virtio_vsock_vqs_start(struct virtio_vsock * vsock)562 static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
563 {
564 mutex_lock(&vsock->tx_lock);
565 vsock->tx_run = true;
566 mutex_unlock(&vsock->tx_lock);
567
568 mutex_lock(&vsock->rx_lock);
569 virtio_vsock_rx_fill(vsock);
570 vsock->rx_run = true;
571 mutex_unlock(&vsock->rx_lock);
572
573 mutex_lock(&vsock->event_lock);
574 virtio_vsock_event_fill(vsock);
575 vsock->event_run = true;
576 mutex_unlock(&vsock->event_lock);
577
578 /* virtio_transport_send_pkt() can queue packets once
579 * the_virtio_vsock is set, but they won't be processed until
580 * vsock->tx_run is set to true. We queue vsock->send_pkt_work
581 * when initialization finishes to send those packets queued
582 * earlier.
583 * We don't need to queue the other workers (rx, event) because
584 * as long as we don't fill the queues with empty buffers, the
585 * host can't send us any notification.
586 */
587 queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
588 }
589
virtio_vsock_vqs_del(struct virtio_vsock * vsock)590 static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
591 {
592 struct virtio_device *vdev = vsock->vdev;
593 struct sk_buff *skb;
594
595 /* Reset all connected sockets when the VQs disappear */
596 vsock_for_each_connected_socket(&virtio_transport.transport,
597 virtio_vsock_reset_sock);
598
599 /* Stop all work handlers to make sure no one is accessing the device,
600 * so we can safely call virtio_reset_device().
601 */
602 mutex_lock(&vsock->rx_lock);
603 vsock->rx_run = false;
604 mutex_unlock(&vsock->rx_lock);
605
606 mutex_lock(&vsock->tx_lock);
607 vsock->tx_run = false;
608 mutex_unlock(&vsock->tx_lock);
609
610 mutex_lock(&vsock->event_lock);
611 vsock->event_run = false;
612 mutex_unlock(&vsock->event_lock);
613
614 /* Flush all device writes and interrupts, device will not use any
615 * more buffers.
616 */
617 virtio_reset_device(vdev);
618
619 mutex_lock(&vsock->rx_lock);
620 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
621 kfree_skb(skb);
622 mutex_unlock(&vsock->rx_lock);
623
624 mutex_lock(&vsock->tx_lock);
625 while ((skb = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
626 kfree_skb(skb);
627 mutex_unlock(&vsock->tx_lock);
628
629 virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
630
631 /* Delete virtqueues and flush outstanding callbacks if any */
632 vdev->config->del_vqs(vdev);
633 }
634
virtio_vsock_probe(struct virtio_device * vdev)635 static int virtio_vsock_probe(struct virtio_device *vdev)
636 {
637 struct virtio_vsock *vsock = NULL;
638 int ret;
639
640 ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
641 if (ret)
642 return ret;
643
644 /* Only one virtio-vsock device per guest is supported */
645 if (rcu_dereference_protected(the_virtio_vsock,
646 lockdep_is_held(&the_virtio_vsock_mutex))) {
647 ret = -EBUSY;
648 goto out;
649 }
650
651 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
652 if (!vsock) {
653 ret = -ENOMEM;
654 goto out;
655 }
656
657 vsock->vdev = vdev;
658
659 vsock->rx_buf_nr = 0;
660 vsock->rx_buf_max_nr = 0;
661 atomic_set(&vsock->queued_replies, 0);
662
663 mutex_init(&vsock->tx_lock);
664 mutex_init(&vsock->rx_lock);
665 mutex_init(&vsock->event_lock);
666 skb_queue_head_init(&vsock->send_pkt_queue);
667 INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
668 INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
669 INIT_WORK(&vsock->event_work, virtio_transport_event_work);
670 INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
671
672 if (virtio_has_feature(vdev, VIRTIO_VSOCK_F_SEQPACKET))
673 vsock->seqpacket_allow = true;
674
675 vdev->priv = vsock;
676
677 ret = virtio_vsock_vqs_init(vsock);
678 if (ret < 0)
679 goto out;
680
681 rcu_assign_pointer(the_virtio_vsock, vsock);
682 virtio_vsock_vqs_start(vsock);
683
684 mutex_unlock(&the_virtio_vsock_mutex);
685
686 return 0;
687
688 out:
689 kfree(vsock);
690 mutex_unlock(&the_virtio_vsock_mutex);
691 return ret;
692 }
693
virtio_vsock_remove(struct virtio_device * vdev)694 static void virtio_vsock_remove(struct virtio_device *vdev)
695 {
696 struct virtio_vsock *vsock = vdev->priv;
697
698 mutex_lock(&the_virtio_vsock_mutex);
699
700 vdev->priv = NULL;
701 rcu_assign_pointer(the_virtio_vsock, NULL);
702 synchronize_rcu();
703
704 virtio_vsock_vqs_del(vsock);
705
706 /* Other works can be queued before 'config->del_vqs()', so we flush
707 * all works before to free the vsock object to avoid use after free.
708 */
709 flush_work(&vsock->rx_work);
710 flush_work(&vsock->tx_work);
711 flush_work(&vsock->event_work);
712 flush_work(&vsock->send_pkt_work);
713
714 mutex_unlock(&the_virtio_vsock_mutex);
715
716 kfree(vsock);
717 }
718
719 #ifdef CONFIG_PM_SLEEP
virtio_vsock_freeze(struct virtio_device * vdev)720 static int virtio_vsock_freeze(struct virtio_device *vdev)
721 {
722 struct virtio_vsock *vsock = vdev->priv;
723
724 mutex_lock(&the_virtio_vsock_mutex);
725
726 rcu_assign_pointer(the_virtio_vsock, NULL);
727 synchronize_rcu();
728
729 virtio_vsock_vqs_del(vsock);
730
731 mutex_unlock(&the_virtio_vsock_mutex);
732
733 return 0;
734 }
735
virtio_vsock_restore(struct virtio_device * vdev)736 static int virtio_vsock_restore(struct virtio_device *vdev)
737 {
738 struct virtio_vsock *vsock = vdev->priv;
739 int ret;
740
741 mutex_lock(&the_virtio_vsock_mutex);
742
743 /* Only one virtio-vsock device per guest is supported */
744 if (rcu_dereference_protected(the_virtio_vsock,
745 lockdep_is_held(&the_virtio_vsock_mutex))) {
746 ret = -EBUSY;
747 goto out;
748 }
749
750 ret = virtio_vsock_vqs_init(vsock);
751 if (ret < 0)
752 goto out;
753
754 rcu_assign_pointer(the_virtio_vsock, vsock);
755 virtio_vsock_vqs_start(vsock);
756
757 out:
758 mutex_unlock(&the_virtio_vsock_mutex);
759 return ret;
760 }
761 #endif /* CONFIG_PM_SLEEP */
762
763 static struct virtio_device_id id_table[] = {
764 { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
765 { 0 },
766 };
767
768 static unsigned int features[] = {
769 VIRTIO_VSOCK_F_SEQPACKET
770 };
771
772 static struct virtio_driver virtio_vsock_driver = {
773 .feature_table = features,
774 .feature_table_size = ARRAY_SIZE(features),
775 .driver.name = KBUILD_MODNAME,
776 .driver.owner = THIS_MODULE,
777 .id_table = id_table,
778 .probe = virtio_vsock_probe,
779 .remove = virtio_vsock_remove,
780 #ifdef CONFIG_PM_SLEEP
781 .freeze = virtio_vsock_freeze,
782 .restore = virtio_vsock_restore,
783 #endif
784 };
785
virtio_vsock_init(void)786 static int __init virtio_vsock_init(void)
787 {
788 int ret;
789
790 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
791 if (!virtio_vsock_workqueue)
792 return -ENOMEM;
793
794 ret = vsock_core_register(&virtio_transport.transport,
795 VSOCK_TRANSPORT_F_G2H);
796 if (ret)
797 goto out_wq;
798
799 ret = register_virtio_driver(&virtio_vsock_driver);
800 if (ret)
801 goto out_vci;
802
803 return 0;
804
805 out_vci:
806 vsock_core_unregister(&virtio_transport.transport);
807 out_wq:
808 destroy_workqueue(virtio_vsock_workqueue);
809 return ret;
810 }
811
virtio_vsock_exit(void)812 static void __exit virtio_vsock_exit(void)
813 {
814 unregister_virtio_driver(&virtio_vsock_driver);
815 vsock_core_unregister(&virtio_transport.transport);
816 destroy_workqueue(virtio_vsock_workqueue);
817 }
818
819 module_init(virtio_vsock_init);
820 module_exit(virtio_vsock_exit);
821 MODULE_LICENSE("GPL v2");
822 MODULE_AUTHOR("Asias He");
823 MODULE_DESCRIPTION("virtio transport for vsock");
824 MODULE_DEVICE_TABLE(virtio, id_table);
825