• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * vhost transport for vsock
3  *
4  * Copyright (C) 2013-2015 Red Hat, Inc.
5  * Author: Asias He <asias@redhat.com>
6  *         Stefan Hajnoczi <stefanha@redhat.com>
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2.
9  */
10 #include <linux/miscdevice.h>
11 #include <linux/atomic.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/vmalloc.h>
15 #include <net/sock.h>
16 #include <linux/virtio_vsock.h>
17 #include <linux/vhost.h>
18 #include <linux/hashtable.h>
19 
20 #include <net/af_vsock.h>
21 #include "vhost.h"
22 
23 #define VHOST_VSOCK_DEFAULT_HOST_CID	2
24 /* Max number of bytes transferred before requeueing the job.
25  * Using this limit prevents one virtqueue from starving others. */
26 #define VHOST_VSOCK_WEIGHT 0x80000
27 /* Max number of packets transferred before requeueing the job.
28  * Using this limit prevents one virtqueue from starving others with
29  * small pkts.
30  */
31 #define VHOST_VSOCK_PKT_WEIGHT 256
32 
33 enum {
34 	VHOST_VSOCK_FEATURES = VHOST_FEATURES,
35 };
36 
37 /* Used to track all the vhost_vsock instances on the system. */
38 static DEFINE_SPINLOCK(vhost_vsock_lock);
39 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
40 
41 struct vhost_vsock {
42 	struct vhost_dev dev;
43 	struct vhost_virtqueue vqs[2];
44 
45 	/* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
46 	struct hlist_node hash;
47 
48 	struct vhost_work send_pkt_work;
49 	spinlock_t send_pkt_list_lock;
50 	struct list_head send_pkt_list;	/* host->guest pending packets */
51 
52 	atomic_t queued_replies;
53 
54 	u32 guest_cid;
55 };
56 
vhost_transport_get_local_cid(void)57 static u32 vhost_transport_get_local_cid(void)
58 {
59 	return VHOST_VSOCK_DEFAULT_HOST_CID;
60 }
61 
62 /* Callers that dereference the return value must hold vhost_vsock_lock or the
63  * RCU read lock.
64  */
vhost_vsock_get(u32 guest_cid)65 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
66 {
67 	struct vhost_vsock *vsock;
68 
69 	hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
70 		u32 other_cid = vsock->guest_cid;
71 
72 		/* Skip instances that have no CID yet */
73 		if (other_cid == 0)
74 			continue;
75 
76 		if (other_cid == guest_cid) {
77 			return vsock;
78 		}
79 	}
80 
81 	return NULL;
82 }
83 
84 static void
vhost_transport_do_send_pkt(struct vhost_vsock * vsock,struct vhost_virtqueue * vq)85 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
86 			    struct vhost_virtqueue *vq)
87 {
88 	struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
89 	bool added = false;
90 	bool restart_tx = false;
91 
92 	mutex_lock(&vq->mutex);
93 
94 	if (!vq->private_data)
95 		goto out;
96 
97 	/* Avoid further vmexits, we're already processing the virtqueue */
98 	vhost_disable_notify(&vsock->dev, vq);
99 
100 	for (;;) {
101 		struct virtio_vsock_pkt *pkt;
102 		struct iov_iter iov_iter;
103 		unsigned out, in;
104 		size_t nbytes;
105 		size_t len;
106 		int head;
107 
108 		spin_lock_bh(&vsock->send_pkt_list_lock);
109 		if (list_empty(&vsock->send_pkt_list)) {
110 			spin_unlock_bh(&vsock->send_pkt_list_lock);
111 			vhost_enable_notify(&vsock->dev, vq);
112 			break;
113 		}
114 
115 		pkt = list_first_entry(&vsock->send_pkt_list,
116 				       struct virtio_vsock_pkt, list);
117 		list_del_init(&pkt->list);
118 		spin_unlock_bh(&vsock->send_pkt_list_lock);
119 
120 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
121 					 &out, &in, NULL, NULL);
122 		if (head < 0) {
123 			spin_lock_bh(&vsock->send_pkt_list_lock);
124 			list_add(&pkt->list, &vsock->send_pkt_list);
125 			spin_unlock_bh(&vsock->send_pkt_list_lock);
126 			break;
127 		}
128 
129 		if (head == vq->num) {
130 			spin_lock_bh(&vsock->send_pkt_list_lock);
131 			list_add(&pkt->list, &vsock->send_pkt_list);
132 			spin_unlock_bh(&vsock->send_pkt_list_lock);
133 
134 			/* We cannot finish yet if more buffers snuck in while
135 			 * re-enabling notify.
136 			 */
137 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
138 				vhost_disable_notify(&vsock->dev, vq);
139 				continue;
140 			}
141 			break;
142 		}
143 
144 		if (out) {
145 			virtio_transport_free_pkt(pkt);
146 			vq_err(vq, "Expected 0 output buffers, got %u\n", out);
147 			break;
148 		}
149 
150 		len = iov_length(&vq->iov[out], in);
151 		iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
152 
153 		nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
154 		if (nbytes != sizeof(pkt->hdr)) {
155 			virtio_transport_free_pkt(pkt);
156 			vq_err(vq, "Faulted on copying pkt hdr\n");
157 			break;
158 		}
159 
160 		nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
161 		if (nbytes != pkt->len) {
162 			virtio_transport_free_pkt(pkt);
163 			vq_err(vq, "Faulted on copying pkt buf\n");
164 			break;
165 		}
166 
167 		vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
168 		added = true;
169 
170 		if (pkt->reply) {
171 			int val;
172 
173 			val = atomic_dec_return(&vsock->queued_replies);
174 
175 			/* Do we have resources to resume tx processing? */
176 			if (val + 1 == tx_vq->num)
177 				restart_tx = true;
178 		}
179 
180 		virtio_transport_free_pkt(pkt);
181 	}
182 	if (added)
183 		vhost_signal(&vsock->dev, vq);
184 
185 out:
186 	mutex_unlock(&vq->mutex);
187 
188 	if (restart_tx)
189 		vhost_poll_queue(&tx_vq->poll);
190 }
191 
vhost_transport_send_pkt_work(struct vhost_work * work)192 static void vhost_transport_send_pkt_work(struct vhost_work *work)
193 {
194 	struct vhost_virtqueue *vq;
195 	struct vhost_vsock *vsock;
196 
197 	vsock = container_of(work, struct vhost_vsock, send_pkt_work);
198 	vq = &vsock->vqs[VSOCK_VQ_RX];
199 
200 	vhost_transport_do_send_pkt(vsock, vq);
201 }
202 
203 static int
vhost_transport_send_pkt(struct virtio_vsock_pkt * pkt)204 vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
205 {
206 	struct vhost_vsock *vsock;
207 	struct vhost_virtqueue *vq;
208 	int len = pkt->len;
209 
210 	rcu_read_lock();
211 
212 	/* Find the vhost_vsock according to guest context id  */
213 	vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
214 	if (!vsock) {
215 		rcu_read_unlock();
216 		virtio_transport_free_pkt(pkt);
217 		return -ENODEV;
218 	}
219 
220 	vq = &vsock->vqs[VSOCK_VQ_RX];
221 
222 	if (pkt->reply)
223 		atomic_inc(&vsock->queued_replies);
224 
225 	spin_lock_bh(&vsock->send_pkt_list_lock);
226 	list_add_tail(&pkt->list, &vsock->send_pkt_list);
227 	spin_unlock_bh(&vsock->send_pkt_list_lock);
228 
229 	vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
230 
231 	rcu_read_unlock();
232 	return len;
233 }
234 
235 static int
vhost_transport_cancel_pkt(struct vsock_sock * vsk)236 vhost_transport_cancel_pkt(struct vsock_sock *vsk)
237 {
238 	struct vhost_vsock *vsock;
239 	struct virtio_vsock_pkt *pkt, *n;
240 	int cnt = 0;
241 	int ret = -ENODEV;
242 	LIST_HEAD(freeme);
243 
244 	rcu_read_lock();
245 
246 	/* Find the vhost_vsock according to guest context id  */
247 	vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
248 	if (!vsock)
249 		goto out;
250 
251 	spin_lock_bh(&vsock->send_pkt_list_lock);
252 	list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
253 		if (pkt->vsk != vsk)
254 			continue;
255 		list_move(&pkt->list, &freeme);
256 	}
257 	spin_unlock_bh(&vsock->send_pkt_list_lock);
258 
259 	list_for_each_entry_safe(pkt, n, &freeme, list) {
260 		if (pkt->reply)
261 			cnt++;
262 		list_del(&pkt->list);
263 		virtio_transport_free_pkt(pkt);
264 	}
265 
266 	if (cnt) {
267 		struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
268 		int new_cnt;
269 
270 		new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
271 		if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
272 			vhost_poll_queue(&tx_vq->poll);
273 	}
274 
275 	ret = 0;
276 out:
277 	rcu_read_unlock();
278 	return ret;
279 }
280 
281 static struct virtio_vsock_pkt *
vhost_vsock_alloc_pkt(struct vhost_virtqueue * vq,unsigned int out,unsigned int in)282 vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
283 		      unsigned int out, unsigned int in)
284 {
285 	struct virtio_vsock_pkt *pkt;
286 	struct iov_iter iov_iter;
287 	size_t nbytes;
288 	size_t len;
289 
290 	if (in != 0) {
291 		vq_err(vq, "Expected 0 input buffers, got %u\n", in);
292 		return NULL;
293 	}
294 
295 	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
296 	if (!pkt)
297 		return NULL;
298 
299 	len = iov_length(vq->iov, out);
300 	iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
301 
302 	nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
303 	if (nbytes != sizeof(pkt->hdr)) {
304 		vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
305 		       sizeof(pkt->hdr), nbytes);
306 		kfree(pkt);
307 		return NULL;
308 	}
309 
310 	if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
311 		pkt->len = le32_to_cpu(pkt->hdr.len);
312 
313 	/* No payload */
314 	if (!pkt->len)
315 		return pkt;
316 
317 	/* The pkt is too big */
318 	if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
319 		kfree(pkt);
320 		return NULL;
321 	}
322 
323 	pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
324 	if (!pkt->buf) {
325 		kfree(pkt);
326 		return NULL;
327 	}
328 
329 	nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
330 	if (nbytes != pkt->len) {
331 		vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
332 		       pkt->len, nbytes);
333 		virtio_transport_free_pkt(pkt);
334 		return NULL;
335 	}
336 
337 	return pkt;
338 }
339 
340 /* Is there space left for replies to rx packets? */
vhost_vsock_more_replies(struct vhost_vsock * vsock)341 static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
342 {
343 	struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
344 	int val;
345 
346 	smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
347 	val = atomic_read(&vsock->queued_replies);
348 
349 	return val < vq->num;
350 }
351 
vhost_vsock_handle_tx_kick(struct vhost_work * work)352 static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
353 {
354 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
355 						  poll.work);
356 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
357 						 dev);
358 	struct virtio_vsock_pkt *pkt;
359 	int head;
360 	unsigned int out, in;
361 	bool added = false;
362 
363 	mutex_lock(&vq->mutex);
364 
365 	if (!vq->private_data)
366 		goto out;
367 
368 	vhost_disable_notify(&vsock->dev, vq);
369 	for (;;) {
370 		u32 len;
371 
372 		if (!vhost_vsock_more_replies(vsock)) {
373 			/* Stop tx until the device processes already
374 			 * pending replies.  Leave tx virtqueue
375 			 * callbacks disabled.
376 			 */
377 			goto no_more_replies;
378 		}
379 
380 		head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
381 					 &out, &in, NULL, NULL);
382 		if (head < 0)
383 			break;
384 
385 		if (head == vq->num) {
386 			if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
387 				vhost_disable_notify(&vsock->dev, vq);
388 				continue;
389 			}
390 			break;
391 		}
392 
393 		pkt = vhost_vsock_alloc_pkt(vq, out, in);
394 		if (!pkt) {
395 			vq_err(vq, "Faulted on pkt\n");
396 			continue;
397 		}
398 
399 		len = pkt->len;
400 
401 		/* Only accept correctly addressed packets */
402 		if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
403 			virtio_transport_recv_pkt(pkt);
404 		else
405 			virtio_transport_free_pkt(pkt);
406 
407 		vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
408 		added = true;
409 	}
410 
411 no_more_replies:
412 	if (added)
413 		vhost_signal(&vsock->dev, vq);
414 
415 out:
416 	mutex_unlock(&vq->mutex);
417 }
418 
vhost_vsock_handle_rx_kick(struct vhost_work * work)419 static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
420 {
421 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
422 						poll.work);
423 	struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
424 						 dev);
425 
426 	vhost_transport_do_send_pkt(vsock, vq);
427 }
428 
vhost_vsock_start(struct vhost_vsock * vsock)429 static int vhost_vsock_start(struct vhost_vsock *vsock)
430 {
431 	struct vhost_virtqueue *vq;
432 	size_t i;
433 	int ret;
434 
435 	mutex_lock(&vsock->dev.mutex);
436 
437 	ret = vhost_dev_check_owner(&vsock->dev);
438 	if (ret)
439 		goto err;
440 
441 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
442 		vq = &vsock->vqs[i];
443 
444 		mutex_lock(&vq->mutex);
445 
446 		if (!vhost_vq_access_ok(vq)) {
447 			ret = -EFAULT;
448 			goto err_vq;
449 		}
450 
451 		if (!vq->private_data) {
452 			vq->private_data = vsock;
453 			ret = vhost_vq_init_access(vq);
454 			if (ret)
455 				goto err_vq;
456 		}
457 
458 		mutex_unlock(&vq->mutex);
459 	}
460 
461 	mutex_unlock(&vsock->dev.mutex);
462 	return 0;
463 
464 err_vq:
465 	vq->private_data = NULL;
466 	mutex_unlock(&vq->mutex);
467 
468 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
469 		vq = &vsock->vqs[i];
470 
471 		mutex_lock(&vq->mutex);
472 		vq->private_data = NULL;
473 		mutex_unlock(&vq->mutex);
474 	}
475 err:
476 	mutex_unlock(&vsock->dev.mutex);
477 	return ret;
478 }
479 
vhost_vsock_stop(struct vhost_vsock * vsock)480 static int vhost_vsock_stop(struct vhost_vsock *vsock)
481 {
482 	size_t i;
483 	int ret;
484 
485 	mutex_lock(&vsock->dev.mutex);
486 
487 	ret = vhost_dev_check_owner(&vsock->dev);
488 	if (ret)
489 		goto err;
490 
491 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
492 		struct vhost_virtqueue *vq = &vsock->vqs[i];
493 
494 		mutex_lock(&vq->mutex);
495 		vq->private_data = NULL;
496 		mutex_unlock(&vq->mutex);
497 	}
498 
499 err:
500 	mutex_unlock(&vsock->dev.mutex);
501 	return ret;
502 }
503 
vhost_vsock_free(struct vhost_vsock * vsock)504 static void vhost_vsock_free(struct vhost_vsock *vsock)
505 {
506 	kvfree(vsock);
507 }
508 
vhost_vsock_dev_open(struct inode * inode,struct file * file)509 static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
510 {
511 	struct vhost_virtqueue **vqs;
512 	struct vhost_vsock *vsock;
513 	int ret;
514 
515 	/* This struct is large and allocation could fail, fall back to vmalloc
516 	 * if there is no other way.
517 	 */
518 	vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
519 	if (!vsock) {
520 		vsock = vmalloc(sizeof(*vsock));
521 		if (!vsock)
522 			return -ENOMEM;
523 	}
524 
525 	vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
526 	if (!vqs) {
527 		ret = -ENOMEM;
528 		goto out;
529 	}
530 
531 	vsock->guest_cid = 0; /* no CID assigned yet */
532 
533 	atomic_set(&vsock->queued_replies, 0);
534 
535 	vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
536 	vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
537 	vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
538 	vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
539 
540 	vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
541 		       VHOST_VSOCK_PKT_WEIGHT, VHOST_VSOCK_WEIGHT);
542 
543 	file->private_data = vsock;
544 	spin_lock_init(&vsock->send_pkt_list_lock);
545 	INIT_LIST_HEAD(&vsock->send_pkt_list);
546 	vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
547 	return 0;
548 
549 out:
550 	vhost_vsock_free(vsock);
551 	return ret;
552 }
553 
vhost_vsock_flush(struct vhost_vsock * vsock)554 static void vhost_vsock_flush(struct vhost_vsock *vsock)
555 {
556 	int i;
557 
558 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
559 		if (vsock->vqs[i].handle_kick)
560 			vhost_poll_flush(&vsock->vqs[i].poll);
561 	vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
562 }
563 
vhost_vsock_reset_orphans(struct sock * sk)564 static void vhost_vsock_reset_orphans(struct sock *sk)
565 {
566 	struct vsock_sock *vsk = vsock_sk(sk);
567 
568 	/* vmci_transport.c doesn't take sk_lock here either.  At least we're
569 	 * under vsock_table_lock so the sock cannot disappear while we're
570 	 * executing.
571 	 */
572 
573 	/* If the peer is still valid, no need to reset connection */
574 	if (vhost_vsock_get(vsk->remote_addr.svm_cid))
575 		return;
576 
577 	/* If the close timeout is pending, let it expire.  This avoids races
578 	 * with the timeout callback.
579 	 */
580 	if (vsk->close_work_scheduled)
581 		return;
582 
583 	sock_set_flag(sk, SOCK_DONE);
584 	vsk->peer_shutdown = SHUTDOWN_MASK;
585 	sk->sk_state = SS_UNCONNECTED;
586 	sk->sk_err = ECONNRESET;
587 	sk->sk_error_report(sk);
588 }
589 
vhost_vsock_dev_release(struct inode * inode,struct file * file)590 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
591 {
592 	struct vhost_vsock *vsock = file->private_data;
593 
594 	spin_lock_bh(&vhost_vsock_lock);
595 	if (vsock->guest_cid)
596 		hash_del_rcu(&vsock->hash);
597 	spin_unlock_bh(&vhost_vsock_lock);
598 
599 	/* Wait for other CPUs to finish using vsock */
600 	synchronize_rcu();
601 
602 	/* Iterating over all connections for all CIDs to find orphans is
603 	 * inefficient.  Room for improvement here. */
604 	vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
605 
606 	vhost_vsock_stop(vsock);
607 	vhost_vsock_flush(vsock);
608 	vhost_dev_stop(&vsock->dev);
609 
610 	spin_lock_bh(&vsock->send_pkt_list_lock);
611 	while (!list_empty(&vsock->send_pkt_list)) {
612 		struct virtio_vsock_pkt *pkt;
613 
614 		pkt = list_first_entry(&vsock->send_pkt_list,
615 				struct virtio_vsock_pkt, list);
616 		list_del_init(&pkt->list);
617 		virtio_transport_free_pkt(pkt);
618 	}
619 	spin_unlock_bh(&vsock->send_pkt_list_lock);
620 
621 	vhost_dev_cleanup(&vsock->dev, false);
622 	kfree(vsock->dev.vqs);
623 	vhost_vsock_free(vsock);
624 	return 0;
625 }
626 
vhost_vsock_set_cid(struct vhost_vsock * vsock,u64 guest_cid)627 static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
628 {
629 	struct vhost_vsock *other;
630 
631 	/* Refuse reserved CIDs */
632 	if (guest_cid <= VMADDR_CID_HOST ||
633 	    guest_cid == U32_MAX)
634 		return -EINVAL;
635 
636 	/* 64-bit CIDs are not yet supported */
637 	if (guest_cid > U32_MAX)
638 		return -EINVAL;
639 
640 	/* Refuse if CID is already in use */
641 	spin_lock_bh(&vhost_vsock_lock);
642 	other = vhost_vsock_get(guest_cid);
643 	if (other && other != vsock) {
644 		spin_unlock_bh(&vhost_vsock_lock);
645 		return -EADDRINUSE;
646 	}
647 
648 	if (vsock->guest_cid)
649 		hash_del_rcu(&vsock->hash);
650 
651 	vsock->guest_cid = guest_cid;
652 	hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
653 	spin_unlock_bh(&vhost_vsock_lock);
654 
655 	return 0;
656 }
657 
vhost_vsock_set_features(struct vhost_vsock * vsock,u64 features)658 static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
659 {
660 	struct vhost_virtqueue *vq;
661 	int i;
662 
663 	if (features & ~VHOST_VSOCK_FEATURES)
664 		return -EOPNOTSUPP;
665 
666 	mutex_lock(&vsock->dev.mutex);
667 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
668 	    !vhost_log_access_ok(&vsock->dev)) {
669 		mutex_unlock(&vsock->dev.mutex);
670 		return -EFAULT;
671 	}
672 
673 	for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
674 		vq = &vsock->vqs[i];
675 		mutex_lock(&vq->mutex);
676 		vq->acked_features = features;
677 		mutex_unlock(&vq->mutex);
678 	}
679 	mutex_unlock(&vsock->dev.mutex);
680 	return 0;
681 }
682 
vhost_vsock_dev_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)683 static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
684 				  unsigned long arg)
685 {
686 	struct vhost_vsock *vsock = f->private_data;
687 	void __user *argp = (void __user *)arg;
688 	u64 guest_cid;
689 	u64 features;
690 	int start;
691 	int r;
692 
693 	switch (ioctl) {
694 	case VHOST_VSOCK_SET_GUEST_CID:
695 		if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
696 			return -EFAULT;
697 		return vhost_vsock_set_cid(vsock, guest_cid);
698 	case VHOST_VSOCK_SET_RUNNING:
699 		if (copy_from_user(&start, argp, sizeof(start)))
700 			return -EFAULT;
701 		if (start)
702 			return vhost_vsock_start(vsock);
703 		else
704 			return vhost_vsock_stop(vsock);
705 	case VHOST_GET_FEATURES:
706 		features = VHOST_VSOCK_FEATURES;
707 		if (copy_to_user(argp, &features, sizeof(features)))
708 			return -EFAULT;
709 		return 0;
710 	case VHOST_SET_FEATURES:
711 		if (copy_from_user(&features, argp, sizeof(features)))
712 			return -EFAULT;
713 		return vhost_vsock_set_features(vsock, features);
714 	default:
715 		mutex_lock(&vsock->dev.mutex);
716 		r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
717 		if (r == -ENOIOCTLCMD)
718 			r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
719 		else
720 			vhost_vsock_flush(vsock);
721 		mutex_unlock(&vsock->dev.mutex);
722 		return r;
723 	}
724 }
725 
726 static const struct file_operations vhost_vsock_fops = {
727 	.owner          = THIS_MODULE,
728 	.open           = vhost_vsock_dev_open,
729 	.release        = vhost_vsock_dev_release,
730 	.llseek		= noop_llseek,
731 	.unlocked_ioctl = vhost_vsock_dev_ioctl,
732 };
733 
734 static struct miscdevice vhost_vsock_misc = {
735 	.minor = MISC_DYNAMIC_MINOR,
736 	.name = "vhost-vsock",
737 	.fops = &vhost_vsock_fops,
738 };
739 
740 static struct virtio_transport vhost_transport = {
741 	.transport = {
742 		.get_local_cid            = vhost_transport_get_local_cid,
743 
744 		.init                     = virtio_transport_do_socket_init,
745 		.destruct                 = virtio_transport_destruct,
746 		.release                  = virtio_transport_release,
747 		.connect                  = virtio_transport_connect,
748 		.shutdown                 = virtio_transport_shutdown,
749 		.cancel_pkt               = vhost_transport_cancel_pkt,
750 
751 		.dgram_enqueue            = virtio_transport_dgram_enqueue,
752 		.dgram_dequeue            = virtio_transport_dgram_dequeue,
753 		.dgram_bind               = virtio_transport_dgram_bind,
754 		.dgram_allow              = virtio_transport_dgram_allow,
755 
756 		.stream_enqueue           = virtio_transport_stream_enqueue,
757 		.stream_dequeue           = virtio_transport_stream_dequeue,
758 		.stream_has_data          = virtio_transport_stream_has_data,
759 		.stream_has_space         = virtio_transport_stream_has_space,
760 		.stream_rcvhiwat          = virtio_transport_stream_rcvhiwat,
761 		.stream_is_active         = virtio_transport_stream_is_active,
762 		.stream_allow             = virtio_transport_stream_allow,
763 
764 		.notify_poll_in           = virtio_transport_notify_poll_in,
765 		.notify_poll_out          = virtio_transport_notify_poll_out,
766 		.notify_recv_init         = virtio_transport_notify_recv_init,
767 		.notify_recv_pre_block    = virtio_transport_notify_recv_pre_block,
768 		.notify_recv_pre_dequeue  = virtio_transport_notify_recv_pre_dequeue,
769 		.notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
770 		.notify_send_init         = virtio_transport_notify_send_init,
771 		.notify_send_pre_block    = virtio_transport_notify_send_pre_block,
772 		.notify_send_pre_enqueue  = virtio_transport_notify_send_pre_enqueue,
773 		.notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
774 
775 		.set_buffer_size          = virtio_transport_set_buffer_size,
776 		.set_min_buffer_size      = virtio_transport_set_min_buffer_size,
777 		.set_max_buffer_size      = virtio_transport_set_max_buffer_size,
778 		.get_buffer_size          = virtio_transport_get_buffer_size,
779 		.get_min_buffer_size      = virtio_transport_get_min_buffer_size,
780 		.get_max_buffer_size      = virtio_transport_get_max_buffer_size,
781 	},
782 
783 	.send_pkt = vhost_transport_send_pkt,
784 };
785 
vhost_vsock_init(void)786 static int __init vhost_vsock_init(void)
787 {
788 	int ret;
789 
790 	ret = vsock_core_init(&vhost_transport.transport);
791 	if (ret < 0)
792 		return ret;
793 	return misc_register(&vhost_vsock_misc);
794 };
795 
vhost_vsock_exit(void)796 static void __exit vhost_vsock_exit(void)
797 {
798 	misc_deregister(&vhost_vsock_misc);
799 	vsock_core_exit();
800 };
801 
802 module_init(vhost_vsock_init);
803 module_exit(vhost_vsock_exit);
804 MODULE_LICENSE("GPL v2");
805 MODULE_AUTHOR("Asias He");
806 MODULE_DESCRIPTION("vhost transport for vsock ");
807