1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * test virtio server in host kernel.
7 */
8
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/workqueue.h>
16 #include <linux/file.h>
17 #include <linux/slab.h>
18
19 #include "test.h"
20 #include "vhost.h"
21
22 /* Max number of bytes transferred before requeueing the job.
23 * Using this limit prevents one virtqueue from starving others. */
24 #define VHOST_TEST_WEIGHT 0x80000
25
26 /* Max number of packets transferred before requeueing the job.
27 * Using this limit prevents one virtqueue from starving others with
28 * pkts.
29 */
30 #define VHOST_TEST_PKT_WEIGHT 256
31
32 enum {
33 VHOST_TEST_VQ = 0,
34 VHOST_TEST_VQ_MAX = 1,
35 };
36
37 struct vhost_test {
38 struct vhost_dev dev;
39 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
40 };
41
42 /* Expects to be always run from workqueue - which acts as
43 * read-size critical section for our kind of RCU. */
handle_vq(struct vhost_test * n)44 static void handle_vq(struct vhost_test *n)
45 {
46 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
47 unsigned out, in;
48 int head;
49 size_t len, total_len = 0;
50 void *private;
51
52 mutex_lock(&vq->mutex);
53 private = vq->private_data;
54 if (!private) {
55 mutex_unlock(&vq->mutex);
56 return;
57 }
58
59 vhost_disable_notify(&n->dev, vq);
60
61 for (;;) {
62 head = vhost_get_vq_desc(vq, vq->iov,
63 ARRAY_SIZE(vq->iov),
64 &out, &in,
65 NULL, NULL);
66 /* On error, stop handling until the next kick. */
67 if (unlikely(head < 0))
68 break;
69 /* Nothing new? Wait for eventfd to tell us they refilled. */
70 if (head == vq->num) {
71 if (unlikely(vhost_enable_notify(&n->dev, vq))) {
72 vhost_disable_notify(&n->dev, vq);
73 continue;
74 }
75 break;
76 }
77 if (in) {
78 vq_err(vq, "Unexpected descriptor format for TX: "
79 "out %d, int %d\n", out, in);
80 break;
81 }
82 len = iov_length(vq->iov, out);
83 /* Sanity check */
84 if (!len) {
85 vq_err(vq, "Unexpected 0 len for TX\n");
86 break;
87 }
88 vhost_add_used_and_signal(&n->dev, vq, head, 0);
89 total_len += len;
90 if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
91 break;
92 }
93
94 mutex_unlock(&vq->mutex);
95 }
96
handle_vq_kick(struct vhost_work * work)97 static void handle_vq_kick(struct vhost_work *work)
98 {
99 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
100 poll.work);
101 struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
102
103 handle_vq(n);
104 }
105
vhost_test_open(struct inode * inode,struct file * f)106 static int vhost_test_open(struct inode *inode, struct file *f)
107 {
108 struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
109 struct vhost_dev *dev;
110 struct vhost_virtqueue **vqs;
111
112 if (!n)
113 return -ENOMEM;
114 vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
115 if (!vqs) {
116 kfree(n);
117 return -ENOMEM;
118 }
119
120 dev = &n->dev;
121 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
122 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
123 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
124 VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
125
126 f->private_data = n;
127
128 return 0;
129 }
130
vhost_test_stop_vq(struct vhost_test * n,struct vhost_virtqueue * vq)131 static void *vhost_test_stop_vq(struct vhost_test *n,
132 struct vhost_virtqueue *vq)
133 {
134 void *private;
135
136 mutex_lock(&vq->mutex);
137 private = vq->private_data;
138 vq->private_data = NULL;
139 mutex_unlock(&vq->mutex);
140 return private;
141 }
142
vhost_test_stop(struct vhost_test * n,void ** privatep)143 static void vhost_test_stop(struct vhost_test *n, void **privatep)
144 {
145 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
146 }
147
vhost_test_flush_vq(struct vhost_test * n,int index)148 static void vhost_test_flush_vq(struct vhost_test *n, int index)
149 {
150 vhost_poll_flush(&n->vqs[index].poll);
151 }
152
vhost_test_flush(struct vhost_test * n)153 static void vhost_test_flush(struct vhost_test *n)
154 {
155 vhost_test_flush_vq(n, VHOST_TEST_VQ);
156 }
157
vhost_test_release(struct inode * inode,struct file * f)158 static int vhost_test_release(struct inode *inode, struct file *f)
159 {
160 struct vhost_test *n = f->private_data;
161 void *private;
162
163 vhost_test_stop(n, &private);
164 vhost_test_flush(n);
165 vhost_dev_stop(&n->dev);
166 vhost_dev_cleanup(&n->dev);
167 /* We do an extra flush before freeing memory,
168 * since jobs can re-queue themselves. */
169 vhost_test_flush(n);
170 kfree(n);
171 return 0;
172 }
173
vhost_test_run(struct vhost_test * n,int test)174 static long vhost_test_run(struct vhost_test *n, int test)
175 {
176 void *priv, *oldpriv;
177 struct vhost_virtqueue *vq;
178 int r, index;
179
180 if (test < 0 || test > 1)
181 return -EINVAL;
182
183 mutex_lock(&n->dev.mutex);
184 r = vhost_dev_check_owner(&n->dev);
185 if (r)
186 goto err;
187
188 for (index = 0; index < n->dev.nvqs; ++index) {
189 /* Verify that ring has been setup correctly. */
190 if (!vhost_vq_access_ok(&n->vqs[index])) {
191 r = -EFAULT;
192 goto err;
193 }
194 }
195
196 for (index = 0; index < n->dev.nvqs; ++index) {
197 vq = n->vqs + index;
198 mutex_lock(&vq->mutex);
199 priv = test ? n : NULL;
200
201 /* start polling new socket */
202 oldpriv = vq->private_data;
203 vq->private_data = priv;
204
205 r = vhost_vq_init_access(&n->vqs[index]);
206
207 mutex_unlock(&vq->mutex);
208
209 if (r)
210 goto err;
211
212 if (oldpriv) {
213 vhost_test_flush_vq(n, index);
214 }
215 }
216
217 mutex_unlock(&n->dev.mutex);
218 return 0;
219
220 err:
221 mutex_unlock(&n->dev.mutex);
222 return r;
223 }
224
vhost_test_reset_owner(struct vhost_test * n)225 static long vhost_test_reset_owner(struct vhost_test *n)
226 {
227 void *priv = NULL;
228 long err;
229 struct vhost_umem *umem;
230
231 mutex_lock(&n->dev.mutex);
232 err = vhost_dev_check_owner(&n->dev);
233 if (err)
234 goto done;
235 umem = vhost_dev_reset_owner_prepare();
236 if (!umem) {
237 err = -ENOMEM;
238 goto done;
239 }
240 vhost_test_stop(n, &priv);
241 vhost_test_flush(n);
242 vhost_dev_stop(&n->dev);
243 vhost_dev_reset_owner(&n->dev, umem);
244 done:
245 mutex_unlock(&n->dev.mutex);
246 return err;
247 }
248
vhost_test_set_features(struct vhost_test * n,u64 features)249 static int vhost_test_set_features(struct vhost_test *n, u64 features)
250 {
251 struct vhost_virtqueue *vq;
252
253 mutex_lock(&n->dev.mutex);
254 if ((features & (1 << VHOST_F_LOG_ALL)) &&
255 !vhost_log_access_ok(&n->dev)) {
256 mutex_unlock(&n->dev.mutex);
257 return -EFAULT;
258 }
259 vq = &n->vqs[VHOST_TEST_VQ];
260 mutex_lock(&vq->mutex);
261 vq->acked_features = features;
262 mutex_unlock(&vq->mutex);
263 mutex_unlock(&n->dev.mutex);
264 return 0;
265 }
266
vhost_test_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)267 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
268 unsigned long arg)
269 {
270 struct vhost_test *n = f->private_data;
271 void __user *argp = (void __user *)arg;
272 u64 __user *featurep = argp;
273 int test;
274 u64 features;
275 int r;
276 switch (ioctl) {
277 case VHOST_TEST_RUN:
278 if (copy_from_user(&test, argp, sizeof test))
279 return -EFAULT;
280 return vhost_test_run(n, test);
281 case VHOST_GET_FEATURES:
282 features = VHOST_FEATURES;
283 if (copy_to_user(featurep, &features, sizeof features))
284 return -EFAULT;
285 return 0;
286 case VHOST_SET_FEATURES:
287 printk(KERN_ERR "1\n");
288 if (copy_from_user(&features, featurep, sizeof features))
289 return -EFAULT;
290 printk(KERN_ERR "2\n");
291 if (features & ~VHOST_FEATURES)
292 return -EOPNOTSUPP;
293 printk(KERN_ERR "3\n");
294 return vhost_test_set_features(n, features);
295 case VHOST_RESET_OWNER:
296 return vhost_test_reset_owner(n);
297 default:
298 mutex_lock(&n->dev.mutex);
299 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
300 if (r == -ENOIOCTLCMD)
301 r = vhost_vring_ioctl(&n->dev, ioctl, argp);
302 vhost_test_flush(n);
303 mutex_unlock(&n->dev.mutex);
304 return r;
305 }
306 }
307
308 #ifdef CONFIG_COMPAT
vhost_test_compat_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)309 static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
310 unsigned long arg)
311 {
312 return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
313 }
314 #endif
315
316 static const struct file_operations vhost_test_fops = {
317 .owner = THIS_MODULE,
318 .release = vhost_test_release,
319 .unlocked_ioctl = vhost_test_ioctl,
320 #ifdef CONFIG_COMPAT
321 .compat_ioctl = vhost_test_compat_ioctl,
322 #endif
323 .open = vhost_test_open,
324 .llseek = noop_llseek,
325 };
326
327 static struct miscdevice vhost_test_misc = {
328 MISC_DYNAMIC_MINOR,
329 "vhost-test",
330 &vhost_test_fops,
331 };
332 module_misc_device(vhost_test_misc);
333
334 MODULE_VERSION("0.0.1");
335 MODULE_LICENSE("GPL v2");
336 MODULE_AUTHOR("Michael S. Tsirkin");
337 MODULE_DESCRIPTION("Host kernel side for virtio simulator");
338