• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Author: Michael S. Tsirkin <mst@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * test virtio server in host kernel.
7  */
8 
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/workqueue.h>
16 #include <linux/file.h>
17 #include <linux/slab.h>
18 
19 #include "test.h"
20 #include "vhost.h"
21 
22 /* Max number of bytes transferred before requeueing the job.
23  * Using this limit prevents one virtqueue from starving others. */
24 #define VHOST_TEST_WEIGHT 0x80000
25 
26 enum {
27 	VHOST_TEST_VQ = 0,
28 	VHOST_TEST_VQ_MAX = 1,
29 };
30 
31 struct vhost_test {
32 	struct vhost_dev dev;
33 	struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
34 };
35 
36 /* Expects to be always run from workqueue - which acts as
37  * read-size critical section for our kind of RCU. */
handle_vq(struct vhost_test * n)38 static void handle_vq(struct vhost_test *n)
39 {
40 	struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
41 	unsigned out, in;
42 	int head;
43 	size_t len, total_len = 0;
44 	void *private;
45 
46 	mutex_lock(&vq->mutex);
47 	private = vq->private_data;
48 	if (!private) {
49 		mutex_unlock(&vq->mutex);
50 		return;
51 	}
52 
53 	vhost_disable_notify(&n->dev, vq);
54 
55 	for (;;) {
56 		head = vhost_get_vq_desc(vq, vq->iov,
57 					 ARRAY_SIZE(vq->iov),
58 					 &out, &in,
59 					 NULL, NULL);
60 		/* On error, stop handling until the next kick. */
61 		if (unlikely(head < 0))
62 			break;
63 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
64 		if (head == vq->num) {
65 			if (unlikely(vhost_enable_notify(&n->dev, vq))) {
66 				vhost_disable_notify(&n->dev, vq);
67 				continue;
68 			}
69 			break;
70 		}
71 		if (in) {
72 			vq_err(vq, "Unexpected descriptor format for TX: "
73 			       "out %d, int %d\n", out, in);
74 			break;
75 		}
76 		len = iov_length(vq->iov, out);
77 		/* Sanity check */
78 		if (!len) {
79 			vq_err(vq, "Unexpected 0 len for TX\n");
80 			break;
81 		}
82 		vhost_add_used_and_signal(&n->dev, vq, head, 0);
83 		total_len += len;
84 		if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
85 			vhost_poll_queue(&vq->poll);
86 			break;
87 		}
88 	}
89 
90 	mutex_unlock(&vq->mutex);
91 }
92 
handle_vq_kick(struct vhost_work * work)93 static void handle_vq_kick(struct vhost_work *work)
94 {
95 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
96 						  poll.work);
97 	struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
98 
99 	handle_vq(n);
100 }
101 
vhost_test_open(struct inode * inode,struct file * f)102 static int vhost_test_open(struct inode *inode, struct file *f)
103 {
104 	struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
105 	struct vhost_dev *dev;
106 	struct vhost_virtqueue **vqs;
107 
108 	if (!n)
109 		return -ENOMEM;
110 	vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
111 	if (!vqs) {
112 		kfree(n);
113 		return -ENOMEM;
114 	}
115 
116 	dev = &n->dev;
117 	vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
118 	n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
119 	vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
120 
121 	f->private_data = n;
122 
123 	return 0;
124 }
125 
vhost_test_stop_vq(struct vhost_test * n,struct vhost_virtqueue * vq)126 static void *vhost_test_stop_vq(struct vhost_test *n,
127 				struct vhost_virtqueue *vq)
128 {
129 	void *private;
130 
131 	mutex_lock(&vq->mutex);
132 	private = vq->private_data;
133 	vq->private_data = NULL;
134 	mutex_unlock(&vq->mutex);
135 	return private;
136 }
137 
vhost_test_stop(struct vhost_test * n,void ** privatep)138 static void vhost_test_stop(struct vhost_test *n, void **privatep)
139 {
140 	*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
141 }
142 
vhost_test_flush_vq(struct vhost_test * n,int index)143 static void vhost_test_flush_vq(struct vhost_test *n, int index)
144 {
145 	vhost_poll_flush(&n->vqs[index].poll);
146 }
147 
vhost_test_flush(struct vhost_test * n)148 static void vhost_test_flush(struct vhost_test *n)
149 {
150 	vhost_test_flush_vq(n, VHOST_TEST_VQ);
151 }
152 
vhost_test_release(struct inode * inode,struct file * f)153 static int vhost_test_release(struct inode *inode, struct file *f)
154 {
155 	struct vhost_test *n = f->private_data;
156 	void  *private;
157 
158 	vhost_test_stop(n, &private);
159 	vhost_test_flush(n);
160 	vhost_dev_cleanup(&n->dev, false);
161 	/* We do an extra flush before freeing memory,
162 	 * since jobs can re-queue themselves. */
163 	vhost_test_flush(n);
164 	kfree(n);
165 	return 0;
166 }
167 
vhost_test_run(struct vhost_test * n,int test)168 static long vhost_test_run(struct vhost_test *n, int test)
169 {
170 	void *priv, *oldpriv;
171 	struct vhost_virtqueue *vq;
172 	int r, index;
173 
174 	if (test < 0 || test > 1)
175 		return -EINVAL;
176 
177 	mutex_lock(&n->dev.mutex);
178 	r = vhost_dev_check_owner(&n->dev);
179 	if (r)
180 		goto err;
181 
182 	for (index = 0; index < n->dev.nvqs; ++index) {
183 		/* Verify that ring has been setup correctly. */
184 		if (!vhost_vq_access_ok(&n->vqs[index])) {
185 			r = -EFAULT;
186 			goto err;
187 		}
188 	}
189 
190 	for (index = 0; index < n->dev.nvqs; ++index) {
191 		vq = n->vqs + index;
192 		mutex_lock(&vq->mutex);
193 		priv = test ? n : NULL;
194 
195 		/* start polling new socket */
196 		oldpriv = vq->private_data;
197 		vq->private_data = priv;
198 
199 		r = vhost_vq_init_access(&n->vqs[index]);
200 
201 		mutex_unlock(&vq->mutex);
202 
203 		if (r)
204 			goto err;
205 
206 		if (oldpriv) {
207 			vhost_test_flush_vq(n, index);
208 		}
209 	}
210 
211 	mutex_unlock(&n->dev.mutex);
212 	return 0;
213 
214 err:
215 	mutex_unlock(&n->dev.mutex);
216 	return r;
217 }
218 
vhost_test_reset_owner(struct vhost_test * n)219 static long vhost_test_reset_owner(struct vhost_test *n)
220 {
221 	void *priv = NULL;
222 	long err;
223 	struct vhost_umem *umem;
224 
225 	mutex_lock(&n->dev.mutex);
226 	err = vhost_dev_check_owner(&n->dev);
227 	if (err)
228 		goto done;
229 	umem = vhost_dev_reset_owner_prepare();
230 	if (!umem) {
231 		err = -ENOMEM;
232 		goto done;
233 	}
234 	vhost_test_stop(n, &priv);
235 	vhost_test_flush(n);
236 	vhost_dev_reset_owner(&n->dev, umem);
237 done:
238 	mutex_unlock(&n->dev.mutex);
239 	return err;
240 }
241 
vhost_test_set_features(struct vhost_test * n,u64 features)242 static int vhost_test_set_features(struct vhost_test *n, u64 features)
243 {
244 	struct vhost_virtqueue *vq;
245 
246 	mutex_lock(&n->dev.mutex);
247 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
248 	    !vhost_log_access_ok(&n->dev)) {
249 		mutex_unlock(&n->dev.mutex);
250 		return -EFAULT;
251 	}
252 	vq = &n->vqs[VHOST_TEST_VQ];
253 	mutex_lock(&vq->mutex);
254 	vq->acked_features = features;
255 	mutex_unlock(&vq->mutex);
256 	mutex_unlock(&n->dev.mutex);
257 	return 0;
258 }
259 
vhost_test_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)260 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
261 			     unsigned long arg)
262 {
263 	struct vhost_test *n = f->private_data;
264 	void __user *argp = (void __user *)arg;
265 	u64 __user *featurep = argp;
266 	int test;
267 	u64 features;
268 	int r;
269 	switch (ioctl) {
270 	case VHOST_TEST_RUN:
271 		if (copy_from_user(&test, argp, sizeof test))
272 			return -EFAULT;
273 		return vhost_test_run(n, test);
274 	case VHOST_GET_FEATURES:
275 		features = VHOST_FEATURES;
276 		if (copy_to_user(featurep, &features, sizeof features))
277 			return -EFAULT;
278 		return 0;
279 	case VHOST_SET_FEATURES:
280 		printk(KERN_ERR "1\n");
281 		if (copy_from_user(&features, featurep, sizeof features))
282 			return -EFAULT;
283 		printk(KERN_ERR "2\n");
284 		if (features & ~VHOST_FEATURES)
285 			return -EOPNOTSUPP;
286 		printk(KERN_ERR "3\n");
287 		return vhost_test_set_features(n, features);
288 	case VHOST_RESET_OWNER:
289 		return vhost_test_reset_owner(n);
290 	default:
291 		mutex_lock(&n->dev.mutex);
292 		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
293                 if (r == -ENOIOCTLCMD)
294                         r = vhost_vring_ioctl(&n->dev, ioctl, argp);
295 		vhost_test_flush(n);
296 		mutex_unlock(&n->dev.mutex);
297 		return r;
298 	}
299 }
300 
301 #ifdef CONFIG_COMPAT
vhost_test_compat_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)302 static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
303 				   unsigned long arg)
304 {
305 	return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
306 }
307 #endif
308 
309 static const struct file_operations vhost_test_fops = {
310 	.owner          = THIS_MODULE,
311 	.release        = vhost_test_release,
312 	.unlocked_ioctl = vhost_test_ioctl,
313 #ifdef CONFIG_COMPAT
314 	.compat_ioctl   = vhost_test_compat_ioctl,
315 #endif
316 	.open           = vhost_test_open,
317 	.llseek		= noop_llseek,
318 };
319 
320 static struct miscdevice vhost_test_misc = {
321 	MISC_DYNAMIC_MINOR,
322 	"vhost-test",
323 	&vhost_test_fops,
324 };
325 module_misc_device(vhost_test_misc);
326 
327 MODULE_VERSION("0.0.1");
328 MODULE_LICENSE("GPL v2");
329 MODULE_AUTHOR("Michael S. Tsirkin");
330 MODULE_DESCRIPTION("Host kernel side for virtio simulator");
331