1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (C) 2009 Red Hat, Inc.
3*4882a593Smuzhiyun * Author: Michael S. Tsirkin <mst@redhat.com>
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * test virtio server in host kernel.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/compat.h>
9*4882a593Smuzhiyun #include <linux/eventfd.h>
10*4882a593Smuzhiyun #include <linux/vhost.h>
11*4882a593Smuzhiyun #include <linux/miscdevice.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/mutex.h>
14*4882a593Smuzhiyun #include <linux/workqueue.h>
15*4882a593Smuzhiyun #include <linux/file.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "test.h"
19*4882a593Smuzhiyun #include "vhost.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* Max number of bytes transferred before requeueing the job.
22*4882a593Smuzhiyun * Using this limit prevents one virtqueue from starving others. */
23*4882a593Smuzhiyun #define VHOST_TEST_WEIGHT 0x80000
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* Max number of packets transferred before requeueing the job.
26*4882a593Smuzhiyun * Using this limit prevents one virtqueue from starving others with
27*4882a593Smuzhiyun * pkts.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun #define VHOST_TEST_PKT_WEIGHT 256
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun enum {
32*4882a593Smuzhiyun VHOST_TEST_VQ = 0,
33*4882a593Smuzhiyun VHOST_TEST_VQ_MAX = 1,
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct vhost_test {
37*4882a593Smuzhiyun struct vhost_dev dev;
38*4882a593Smuzhiyun struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Expects to be always run from workqueue - which acts as
42*4882a593Smuzhiyun * read-size critical section for our kind of RCU. */
handle_vq(struct vhost_test * n)43*4882a593Smuzhiyun static void handle_vq(struct vhost_test *n)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
46*4882a593Smuzhiyun unsigned out, in;
47*4882a593Smuzhiyun int head;
48*4882a593Smuzhiyun size_t len, total_len = 0;
49*4882a593Smuzhiyun void *private;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun mutex_lock(&vq->mutex);
52*4882a593Smuzhiyun private = vhost_vq_get_backend(vq);
53*4882a593Smuzhiyun if (!private) {
54*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
55*4882a593Smuzhiyun return;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun vhost_disable_notify(&n->dev, vq);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun for (;;) {
61*4882a593Smuzhiyun head = vhost_get_vq_desc(vq, vq->iov,
62*4882a593Smuzhiyun ARRAY_SIZE(vq->iov),
63*4882a593Smuzhiyun &out, &in,
64*4882a593Smuzhiyun NULL, NULL);
65*4882a593Smuzhiyun /* On error, stop handling until the next kick. */
66*4882a593Smuzhiyun if (unlikely(head < 0))
67*4882a593Smuzhiyun break;
68*4882a593Smuzhiyun /* Nothing new? Wait for eventfd to tell us they refilled. */
69*4882a593Smuzhiyun if (head == vq->num) {
70*4882a593Smuzhiyun if (unlikely(vhost_enable_notify(&n->dev, vq))) {
71*4882a593Smuzhiyun vhost_disable_notify(&n->dev, vq);
72*4882a593Smuzhiyun continue;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun break;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun if (in) {
77*4882a593Smuzhiyun vq_err(vq, "Unexpected descriptor format for TX: "
78*4882a593Smuzhiyun "out %d, int %d\n", out, in);
79*4882a593Smuzhiyun break;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun len = iov_length(vq->iov, out);
82*4882a593Smuzhiyun /* Sanity check */
83*4882a593Smuzhiyun if (!len) {
84*4882a593Smuzhiyun vq_err(vq, "Unexpected 0 len for TX\n");
85*4882a593Smuzhiyun break;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun vhost_add_used_and_signal(&n->dev, vq, head, 0);
88*4882a593Smuzhiyun total_len += len;
89*4882a593Smuzhiyun if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
90*4882a593Smuzhiyun break;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
handle_vq_kick(struct vhost_work * work)96*4882a593Smuzhiyun static void handle_vq_kick(struct vhost_work *work)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
99*4882a593Smuzhiyun poll.work);
100*4882a593Smuzhiyun struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun handle_vq(n);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
vhost_test_open(struct inode * inode,struct file * f)105*4882a593Smuzhiyun static int vhost_test_open(struct inode *inode, struct file *f)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
108*4882a593Smuzhiyun struct vhost_dev *dev;
109*4882a593Smuzhiyun struct vhost_virtqueue **vqs;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (!n)
112*4882a593Smuzhiyun return -ENOMEM;
113*4882a593Smuzhiyun vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
114*4882a593Smuzhiyun if (!vqs) {
115*4882a593Smuzhiyun kfree(n);
116*4882a593Smuzhiyun return -ENOMEM;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun dev = &n->dev;
120*4882a593Smuzhiyun vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
121*4882a593Smuzhiyun n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
122*4882a593Smuzhiyun vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
123*4882a593Smuzhiyun VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun f->private_data = n;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
vhost_test_stop_vq(struct vhost_test * n,struct vhost_virtqueue * vq)130*4882a593Smuzhiyun static void *vhost_test_stop_vq(struct vhost_test *n,
131*4882a593Smuzhiyun struct vhost_virtqueue *vq)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun void *private;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun mutex_lock(&vq->mutex);
136*4882a593Smuzhiyun private = vhost_vq_get_backend(vq);
137*4882a593Smuzhiyun vhost_vq_set_backend(vq, NULL);
138*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
139*4882a593Smuzhiyun return private;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
vhost_test_stop(struct vhost_test * n,void ** privatep)142*4882a593Smuzhiyun static void vhost_test_stop(struct vhost_test *n, void **privatep)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
vhost_test_flush_vq(struct vhost_test * n,int index)147*4882a593Smuzhiyun static void vhost_test_flush_vq(struct vhost_test *n, int index)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun vhost_poll_flush(&n->vqs[index].poll);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
vhost_test_flush(struct vhost_test * n)152*4882a593Smuzhiyun static void vhost_test_flush(struct vhost_test *n)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun vhost_test_flush_vq(n, VHOST_TEST_VQ);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
vhost_test_release(struct inode * inode,struct file * f)157*4882a593Smuzhiyun static int vhost_test_release(struct inode *inode, struct file *f)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun struct vhost_test *n = f->private_data;
160*4882a593Smuzhiyun void *private;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun vhost_test_stop(n, &private);
163*4882a593Smuzhiyun vhost_test_flush(n);
164*4882a593Smuzhiyun vhost_dev_stop(&n->dev);
165*4882a593Smuzhiyun vhost_dev_cleanup(&n->dev);
166*4882a593Smuzhiyun /* We do an extra flush before freeing memory,
167*4882a593Smuzhiyun * since jobs can re-queue themselves. */
168*4882a593Smuzhiyun vhost_test_flush(n);
169*4882a593Smuzhiyun kfree(n);
170*4882a593Smuzhiyun return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
vhost_test_run(struct vhost_test * n,int test)173*4882a593Smuzhiyun static long vhost_test_run(struct vhost_test *n, int test)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun void *priv, *oldpriv;
176*4882a593Smuzhiyun struct vhost_virtqueue *vq;
177*4882a593Smuzhiyun int r, index;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (test < 0 || test > 1)
180*4882a593Smuzhiyun return -EINVAL;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun mutex_lock(&n->dev.mutex);
183*4882a593Smuzhiyun r = vhost_dev_check_owner(&n->dev);
184*4882a593Smuzhiyun if (r)
185*4882a593Smuzhiyun goto err;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun for (index = 0; index < n->dev.nvqs; ++index) {
188*4882a593Smuzhiyun /* Verify that ring has been setup correctly. */
189*4882a593Smuzhiyun if (!vhost_vq_access_ok(&n->vqs[index])) {
190*4882a593Smuzhiyun r = -EFAULT;
191*4882a593Smuzhiyun goto err;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun for (index = 0; index < n->dev.nvqs; ++index) {
196*4882a593Smuzhiyun vq = n->vqs + index;
197*4882a593Smuzhiyun mutex_lock(&vq->mutex);
198*4882a593Smuzhiyun priv = test ? n : NULL;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* start polling new socket */
201*4882a593Smuzhiyun oldpriv = vhost_vq_get_backend(vq);
202*4882a593Smuzhiyun vhost_vq_set_backend(vq, priv);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun r = vhost_vq_init_access(&n->vqs[index]);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (r)
209*4882a593Smuzhiyun goto err;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (oldpriv) {
212*4882a593Smuzhiyun vhost_test_flush_vq(n, index);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun mutex_unlock(&n->dev.mutex);
217*4882a593Smuzhiyun return 0;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun err:
220*4882a593Smuzhiyun mutex_unlock(&n->dev.mutex);
221*4882a593Smuzhiyun return r;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
vhost_test_reset_owner(struct vhost_test * n)224*4882a593Smuzhiyun static long vhost_test_reset_owner(struct vhost_test *n)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun void *priv = NULL;
227*4882a593Smuzhiyun long err;
228*4882a593Smuzhiyun struct vhost_iotlb *umem;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun mutex_lock(&n->dev.mutex);
231*4882a593Smuzhiyun err = vhost_dev_check_owner(&n->dev);
232*4882a593Smuzhiyun if (err)
233*4882a593Smuzhiyun goto done;
234*4882a593Smuzhiyun umem = vhost_dev_reset_owner_prepare();
235*4882a593Smuzhiyun if (!umem) {
236*4882a593Smuzhiyun err = -ENOMEM;
237*4882a593Smuzhiyun goto done;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun vhost_test_stop(n, &priv);
240*4882a593Smuzhiyun vhost_test_flush(n);
241*4882a593Smuzhiyun vhost_dev_stop(&n->dev);
242*4882a593Smuzhiyun vhost_dev_reset_owner(&n->dev, umem);
243*4882a593Smuzhiyun done:
244*4882a593Smuzhiyun mutex_unlock(&n->dev.mutex);
245*4882a593Smuzhiyun return err;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
vhost_test_set_features(struct vhost_test * n,u64 features)248*4882a593Smuzhiyun static int vhost_test_set_features(struct vhost_test *n, u64 features)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct vhost_virtqueue *vq;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun mutex_lock(&n->dev.mutex);
253*4882a593Smuzhiyun if ((features & (1 << VHOST_F_LOG_ALL)) &&
254*4882a593Smuzhiyun !vhost_log_access_ok(&n->dev)) {
255*4882a593Smuzhiyun mutex_unlock(&n->dev.mutex);
256*4882a593Smuzhiyun return -EFAULT;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun vq = &n->vqs[VHOST_TEST_VQ];
259*4882a593Smuzhiyun mutex_lock(&vq->mutex);
260*4882a593Smuzhiyun vq->acked_features = features;
261*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
262*4882a593Smuzhiyun mutex_unlock(&n->dev.mutex);
263*4882a593Smuzhiyun return 0;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
vhost_test_set_backend(struct vhost_test * n,unsigned index,int fd)266*4882a593Smuzhiyun static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun static void *backend;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun const bool enable = fd != -1;
271*4882a593Smuzhiyun struct vhost_virtqueue *vq;
272*4882a593Smuzhiyun int r;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun mutex_lock(&n->dev.mutex);
275*4882a593Smuzhiyun r = vhost_dev_check_owner(&n->dev);
276*4882a593Smuzhiyun if (r)
277*4882a593Smuzhiyun goto err;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (index >= VHOST_TEST_VQ_MAX) {
280*4882a593Smuzhiyun r = -ENOBUFS;
281*4882a593Smuzhiyun goto err;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun vq = &n->vqs[index];
284*4882a593Smuzhiyun mutex_lock(&vq->mutex);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* Verify that ring has been setup correctly. */
287*4882a593Smuzhiyun if (!vhost_vq_access_ok(vq)) {
288*4882a593Smuzhiyun r = -EFAULT;
289*4882a593Smuzhiyun goto err_vq;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun if (!enable) {
292*4882a593Smuzhiyun vhost_poll_stop(&vq->poll);
293*4882a593Smuzhiyun backend = vhost_vq_get_backend(vq);
294*4882a593Smuzhiyun vhost_vq_set_backend(vq, NULL);
295*4882a593Smuzhiyun } else {
296*4882a593Smuzhiyun vhost_vq_set_backend(vq, backend);
297*4882a593Smuzhiyun r = vhost_vq_init_access(vq);
298*4882a593Smuzhiyun if (r == 0)
299*4882a593Smuzhiyun r = vhost_poll_start(&vq->poll, vq->kick);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if (enable) {
305*4882a593Smuzhiyun vhost_test_flush_vq(n, index);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun mutex_unlock(&n->dev.mutex);
309*4882a593Smuzhiyun return 0;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun err_vq:
312*4882a593Smuzhiyun mutex_unlock(&vq->mutex);
313*4882a593Smuzhiyun err:
314*4882a593Smuzhiyun mutex_unlock(&n->dev.mutex);
315*4882a593Smuzhiyun return r;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
vhost_test_ioctl(struct file * f,unsigned int ioctl,unsigned long arg)318*4882a593Smuzhiyun static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
319*4882a593Smuzhiyun unsigned long arg)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct vhost_vring_file backend;
322*4882a593Smuzhiyun struct vhost_test *n = f->private_data;
323*4882a593Smuzhiyun void __user *argp = (void __user *)arg;
324*4882a593Smuzhiyun u64 __user *featurep = argp;
325*4882a593Smuzhiyun int test;
326*4882a593Smuzhiyun u64 features;
327*4882a593Smuzhiyun int r;
328*4882a593Smuzhiyun switch (ioctl) {
329*4882a593Smuzhiyun case VHOST_TEST_RUN:
330*4882a593Smuzhiyun if (copy_from_user(&test, argp, sizeof test))
331*4882a593Smuzhiyun return -EFAULT;
332*4882a593Smuzhiyun return vhost_test_run(n, test);
333*4882a593Smuzhiyun case VHOST_TEST_SET_BACKEND:
334*4882a593Smuzhiyun if (copy_from_user(&backend, argp, sizeof backend))
335*4882a593Smuzhiyun return -EFAULT;
336*4882a593Smuzhiyun return vhost_test_set_backend(n, backend.index, backend.fd);
337*4882a593Smuzhiyun case VHOST_GET_FEATURES:
338*4882a593Smuzhiyun features = VHOST_FEATURES;
339*4882a593Smuzhiyun if (copy_to_user(featurep, &features, sizeof features))
340*4882a593Smuzhiyun return -EFAULT;
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun case VHOST_SET_FEATURES:
343*4882a593Smuzhiyun printk(KERN_ERR "1\n");
344*4882a593Smuzhiyun if (copy_from_user(&features, featurep, sizeof features))
345*4882a593Smuzhiyun return -EFAULT;
346*4882a593Smuzhiyun printk(KERN_ERR "2\n");
347*4882a593Smuzhiyun if (features & ~VHOST_FEATURES)
348*4882a593Smuzhiyun return -EOPNOTSUPP;
349*4882a593Smuzhiyun printk(KERN_ERR "3\n");
350*4882a593Smuzhiyun return vhost_test_set_features(n, features);
351*4882a593Smuzhiyun case VHOST_RESET_OWNER:
352*4882a593Smuzhiyun return vhost_test_reset_owner(n);
353*4882a593Smuzhiyun default:
354*4882a593Smuzhiyun mutex_lock(&n->dev.mutex);
355*4882a593Smuzhiyun r = vhost_dev_ioctl(&n->dev, ioctl, argp);
356*4882a593Smuzhiyun if (r == -ENOIOCTLCMD)
357*4882a593Smuzhiyun r = vhost_vring_ioctl(&n->dev, ioctl, argp);
358*4882a593Smuzhiyun vhost_test_flush(n);
359*4882a593Smuzhiyun mutex_unlock(&n->dev.mutex);
360*4882a593Smuzhiyun return r;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun static const struct file_operations vhost_test_fops = {
365*4882a593Smuzhiyun .owner = THIS_MODULE,
366*4882a593Smuzhiyun .release = vhost_test_release,
367*4882a593Smuzhiyun .unlocked_ioctl = vhost_test_ioctl,
368*4882a593Smuzhiyun .compat_ioctl = compat_ptr_ioctl,
369*4882a593Smuzhiyun .open = vhost_test_open,
370*4882a593Smuzhiyun .llseek = noop_llseek,
371*4882a593Smuzhiyun };
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun static struct miscdevice vhost_test_misc = {
374*4882a593Smuzhiyun MISC_DYNAMIC_MINOR,
375*4882a593Smuzhiyun "vhost-test",
376*4882a593Smuzhiyun &vhost_test_fops,
377*4882a593Smuzhiyun };
378*4882a593Smuzhiyun module_misc_device(vhost_test_misc);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun MODULE_VERSION("0.0.1");
381*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
382*4882a593Smuzhiyun MODULE_AUTHOR("Michael S. Tsirkin");
383*4882a593Smuzhiyun MODULE_DESCRIPTION("Host kernel side for virtio simulator");
384