1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * virtio transport for vsock
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2013-2015 Red Hat, Inc.
6*4882a593Smuzhiyun * Author: Asias He <asias@redhat.com>
7*4882a593Smuzhiyun * Stefan Hajnoczi <stefanha@redhat.com>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Some of the code is take from Gerd Hoffmann <kraxel@redhat.com>'s
10*4882a593Smuzhiyun * early virtio-vsock proof-of-concept bits.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/list.h>
15*4882a593Smuzhiyun #include <linux/atomic.h>
16*4882a593Smuzhiyun #include <linux/virtio.h>
17*4882a593Smuzhiyun #include <linux/virtio_ids.h>
18*4882a593Smuzhiyun #include <linux/virtio_config.h>
19*4882a593Smuzhiyun #include <linux/virtio_vsock.h>
20*4882a593Smuzhiyun #include <net/sock.h>
21*4882a593Smuzhiyun #include <linux/mutex.h>
22*4882a593Smuzhiyun #include <net/af_vsock.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static struct workqueue_struct *virtio_vsock_workqueue;
25*4882a593Smuzhiyun static struct virtio_vsock __rcu *the_virtio_vsock;
26*4882a593Smuzhiyun static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct virtio_vsock {
29*4882a593Smuzhiyun struct virtio_device *vdev;
30*4882a593Smuzhiyun struct virtqueue *vqs[VSOCK_VQ_MAX];
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Virtqueue processing is deferred to a workqueue */
33*4882a593Smuzhiyun struct work_struct tx_work;
34*4882a593Smuzhiyun struct work_struct rx_work;
35*4882a593Smuzhiyun struct work_struct event_work;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* The following fields are protected by tx_lock. vqs[VSOCK_VQ_TX]
38*4882a593Smuzhiyun * must be accessed with tx_lock held.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun struct mutex tx_lock;
41*4882a593Smuzhiyun bool tx_run;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun struct work_struct send_pkt_work;
44*4882a593Smuzhiyun spinlock_t send_pkt_list_lock;
45*4882a593Smuzhiyun struct list_head send_pkt_list;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun atomic_t queued_replies;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* The following fields are protected by rx_lock. vqs[VSOCK_VQ_RX]
50*4882a593Smuzhiyun * must be accessed with rx_lock held.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun struct mutex rx_lock;
53*4882a593Smuzhiyun bool rx_run;
54*4882a593Smuzhiyun int rx_buf_nr;
55*4882a593Smuzhiyun int rx_buf_max_nr;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* The following fields are protected by event_lock.
58*4882a593Smuzhiyun * vqs[VSOCK_VQ_EVENT] must be accessed with event_lock held.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun struct mutex event_lock;
61*4882a593Smuzhiyun bool event_run;
62*4882a593Smuzhiyun struct virtio_vsock_event event_list[8];
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun u32 guest_cid;
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
virtio_transport_get_local_cid(void)67*4882a593Smuzhiyun static u32 virtio_transport_get_local_cid(void)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun struct virtio_vsock *vsock;
70*4882a593Smuzhiyun u32 ret;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun rcu_read_lock();
73*4882a593Smuzhiyun vsock = rcu_dereference(the_virtio_vsock);
74*4882a593Smuzhiyun if (!vsock) {
75*4882a593Smuzhiyun ret = VMADDR_CID_ANY;
76*4882a593Smuzhiyun goto out_rcu;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun ret = vsock->guest_cid;
80*4882a593Smuzhiyun out_rcu:
81*4882a593Smuzhiyun rcu_read_unlock();
82*4882a593Smuzhiyun return ret;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun static void
virtio_transport_send_pkt_work(struct work_struct * work)86*4882a593Smuzhiyun virtio_transport_send_pkt_work(struct work_struct *work)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct virtio_vsock *vsock =
89*4882a593Smuzhiyun container_of(work, struct virtio_vsock, send_pkt_work);
90*4882a593Smuzhiyun struct virtqueue *vq;
91*4882a593Smuzhiyun bool added = false;
92*4882a593Smuzhiyun bool restart_rx = false;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun mutex_lock(&vsock->tx_lock);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (!vsock->tx_run)
97*4882a593Smuzhiyun goto out;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun vq = vsock->vqs[VSOCK_VQ_TX];
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun for (;;) {
102*4882a593Smuzhiyun struct virtio_vsock_pkt *pkt;
103*4882a593Smuzhiyun struct scatterlist hdr, buf, *sgs[2];
104*4882a593Smuzhiyun int ret, in_sg = 0, out_sg = 0;
105*4882a593Smuzhiyun bool reply;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun spin_lock_bh(&vsock->send_pkt_list_lock);
108*4882a593Smuzhiyun if (list_empty(&vsock->send_pkt_list)) {
109*4882a593Smuzhiyun spin_unlock_bh(&vsock->send_pkt_list_lock);
110*4882a593Smuzhiyun break;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun pkt = list_first_entry(&vsock->send_pkt_list,
114*4882a593Smuzhiyun struct virtio_vsock_pkt, list);
115*4882a593Smuzhiyun list_del_init(&pkt->list);
116*4882a593Smuzhiyun spin_unlock_bh(&vsock->send_pkt_list_lock);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun virtio_transport_deliver_tap_pkt(pkt);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun reply = pkt->reply;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
123*4882a593Smuzhiyun sgs[out_sg++] = &hdr;
124*4882a593Smuzhiyun if (pkt->buf) {
125*4882a593Smuzhiyun sg_init_one(&buf, pkt->buf, pkt->len);
126*4882a593Smuzhiyun sgs[out_sg++] = &buf;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL);
130*4882a593Smuzhiyun /* Usually this means that there is no more space available in
131*4882a593Smuzhiyun * the vq
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun if (ret < 0) {
134*4882a593Smuzhiyun spin_lock_bh(&vsock->send_pkt_list_lock);
135*4882a593Smuzhiyun list_add(&pkt->list, &vsock->send_pkt_list);
136*4882a593Smuzhiyun spin_unlock_bh(&vsock->send_pkt_list_lock);
137*4882a593Smuzhiyun break;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (reply) {
141*4882a593Smuzhiyun struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
142*4882a593Smuzhiyun int val;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun val = atomic_dec_return(&vsock->queued_replies);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* Do we now have resources to resume rx processing? */
147*4882a593Smuzhiyun if (val + 1 == virtqueue_get_vring_size(rx_vq))
148*4882a593Smuzhiyun restart_rx = true;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun added = true;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (added)
155*4882a593Smuzhiyun virtqueue_kick(vq);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun out:
158*4882a593Smuzhiyun mutex_unlock(&vsock->tx_lock);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (restart_rx)
161*4882a593Smuzhiyun queue_work(virtio_vsock_workqueue, &vsock->rx_work);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun static int
virtio_transport_send_pkt(struct virtio_vsock_pkt * pkt)165*4882a593Smuzhiyun virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct virtio_vsock *vsock;
168*4882a593Smuzhiyun int len = pkt->len;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun rcu_read_lock();
171*4882a593Smuzhiyun vsock = rcu_dereference(the_virtio_vsock);
172*4882a593Smuzhiyun if (!vsock) {
173*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
174*4882a593Smuzhiyun len = -ENODEV;
175*4882a593Smuzhiyun goto out_rcu;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid) {
179*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
180*4882a593Smuzhiyun len = -ENODEV;
181*4882a593Smuzhiyun goto out_rcu;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (pkt->reply)
185*4882a593Smuzhiyun atomic_inc(&vsock->queued_replies);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun spin_lock_bh(&vsock->send_pkt_list_lock);
188*4882a593Smuzhiyun list_add_tail(&pkt->list, &vsock->send_pkt_list);
189*4882a593Smuzhiyun spin_unlock_bh(&vsock->send_pkt_list_lock);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun out_rcu:
194*4882a593Smuzhiyun rcu_read_unlock();
195*4882a593Smuzhiyun return len;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun static int
virtio_transport_cancel_pkt(struct vsock_sock * vsk)199*4882a593Smuzhiyun virtio_transport_cancel_pkt(struct vsock_sock *vsk)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun struct virtio_vsock *vsock;
202*4882a593Smuzhiyun struct virtio_vsock_pkt *pkt, *n;
203*4882a593Smuzhiyun int cnt = 0, ret;
204*4882a593Smuzhiyun LIST_HEAD(freeme);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun rcu_read_lock();
207*4882a593Smuzhiyun vsock = rcu_dereference(the_virtio_vsock);
208*4882a593Smuzhiyun if (!vsock) {
209*4882a593Smuzhiyun ret = -ENODEV;
210*4882a593Smuzhiyun goto out_rcu;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun spin_lock_bh(&vsock->send_pkt_list_lock);
214*4882a593Smuzhiyun list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
215*4882a593Smuzhiyun if (pkt->vsk != vsk)
216*4882a593Smuzhiyun continue;
217*4882a593Smuzhiyun list_move(&pkt->list, &freeme);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun spin_unlock_bh(&vsock->send_pkt_list_lock);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun list_for_each_entry_safe(pkt, n, &freeme, list) {
222*4882a593Smuzhiyun if (pkt->reply)
223*4882a593Smuzhiyun cnt++;
224*4882a593Smuzhiyun list_del(&pkt->list);
225*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (cnt) {
229*4882a593Smuzhiyun struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
230*4882a593Smuzhiyun int new_cnt;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
233*4882a593Smuzhiyun if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
234*4882a593Smuzhiyun new_cnt < virtqueue_get_vring_size(rx_vq))
235*4882a593Smuzhiyun queue_work(virtio_vsock_workqueue, &vsock->rx_work);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun ret = 0;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun out_rcu:
241*4882a593Smuzhiyun rcu_read_unlock();
242*4882a593Smuzhiyun return ret;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
virtio_vsock_rx_fill(struct virtio_vsock * vsock)245*4882a593Smuzhiyun static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
248*4882a593Smuzhiyun struct virtio_vsock_pkt *pkt;
249*4882a593Smuzhiyun struct scatterlist hdr, buf, *sgs[2];
250*4882a593Smuzhiyun struct virtqueue *vq;
251*4882a593Smuzhiyun int ret;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun vq = vsock->vqs[VSOCK_VQ_RX];
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun do {
256*4882a593Smuzhiyun pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
257*4882a593Smuzhiyun if (!pkt)
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun pkt->buf = kmalloc(buf_len, GFP_KERNEL);
261*4882a593Smuzhiyun if (!pkt->buf) {
262*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
263*4882a593Smuzhiyun break;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun pkt->buf_len = buf_len;
267*4882a593Smuzhiyun pkt->len = buf_len;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun sg_init_one(&hdr, &pkt->hdr, sizeof(pkt->hdr));
270*4882a593Smuzhiyun sgs[0] = &hdr;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun sg_init_one(&buf, pkt->buf, buf_len);
273*4882a593Smuzhiyun sgs[1] = &buf;
274*4882a593Smuzhiyun ret = virtqueue_add_sgs(vq, sgs, 0, 2, pkt, GFP_KERNEL);
275*4882a593Smuzhiyun if (ret) {
276*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun vsock->rx_buf_nr++;
280*4882a593Smuzhiyun } while (vq->num_free);
281*4882a593Smuzhiyun if (vsock->rx_buf_nr > vsock->rx_buf_max_nr)
282*4882a593Smuzhiyun vsock->rx_buf_max_nr = vsock->rx_buf_nr;
283*4882a593Smuzhiyun virtqueue_kick(vq);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
virtio_transport_tx_work(struct work_struct * work)286*4882a593Smuzhiyun static void virtio_transport_tx_work(struct work_struct *work)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct virtio_vsock *vsock =
289*4882a593Smuzhiyun container_of(work, struct virtio_vsock, tx_work);
290*4882a593Smuzhiyun struct virtqueue *vq;
291*4882a593Smuzhiyun bool added = false;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun vq = vsock->vqs[VSOCK_VQ_TX];
294*4882a593Smuzhiyun mutex_lock(&vsock->tx_lock);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (!vsock->tx_run)
297*4882a593Smuzhiyun goto out;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun do {
300*4882a593Smuzhiyun struct virtio_vsock_pkt *pkt;
301*4882a593Smuzhiyun unsigned int len;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun virtqueue_disable_cb(vq);
304*4882a593Smuzhiyun while ((pkt = virtqueue_get_buf(vq, &len)) != NULL) {
305*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
306*4882a593Smuzhiyun added = true;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun } while (!virtqueue_enable_cb(vq));
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun out:
311*4882a593Smuzhiyun mutex_unlock(&vsock->tx_lock);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (added)
314*4882a593Smuzhiyun queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* Is there space left for replies to rx packets? */
virtio_transport_more_replies(struct virtio_vsock * vsock)318*4882a593Smuzhiyun static bool virtio_transport_more_replies(struct virtio_vsock *vsock)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct virtqueue *vq = vsock->vqs[VSOCK_VQ_RX];
321*4882a593Smuzhiyun int val;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
324*4882a593Smuzhiyun val = atomic_read(&vsock->queued_replies);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun return val < virtqueue_get_vring_size(vq);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun /* event_lock must be held */
virtio_vsock_event_fill_one(struct virtio_vsock * vsock,struct virtio_vsock_event * event)330*4882a593Smuzhiyun static int virtio_vsock_event_fill_one(struct virtio_vsock *vsock,
331*4882a593Smuzhiyun struct virtio_vsock_event *event)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun struct scatterlist sg;
334*4882a593Smuzhiyun struct virtqueue *vq;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun vq = vsock->vqs[VSOCK_VQ_EVENT];
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun sg_init_one(&sg, event, sizeof(*event));
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun return virtqueue_add_inbuf(vq, &sg, 1, event, GFP_KERNEL);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* event_lock must be held */
virtio_vsock_event_fill(struct virtio_vsock * vsock)344*4882a593Smuzhiyun static void virtio_vsock_event_fill(struct virtio_vsock *vsock)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun size_t i;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(vsock->event_list); i++) {
349*4882a593Smuzhiyun struct virtio_vsock_event *event = &vsock->event_list[i];
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun virtio_vsock_event_fill_one(vsock, event);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
virtio_vsock_reset_sock(struct sock * sk)357*4882a593Smuzhiyun static void virtio_vsock_reset_sock(struct sock *sk)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun /* vmci_transport.c doesn't take sk_lock here either. At least we're
360*4882a593Smuzhiyun * under vsock_table_lock so the sock cannot disappear while we're
361*4882a593Smuzhiyun * executing.
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun sk->sk_state = TCP_CLOSE;
365*4882a593Smuzhiyun sk->sk_err = ECONNRESET;
366*4882a593Smuzhiyun sk->sk_error_report(sk);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
virtio_vsock_update_guest_cid(struct virtio_vsock * vsock)369*4882a593Smuzhiyun static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun struct virtio_device *vdev = vsock->vdev;
372*4882a593Smuzhiyun __le64 guest_cid;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun vdev->config->get(vdev, offsetof(struct virtio_vsock_config, guest_cid),
375*4882a593Smuzhiyun &guest_cid, sizeof(guest_cid));
376*4882a593Smuzhiyun vsock->guest_cid = le64_to_cpu(guest_cid);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* event_lock must be held */
virtio_vsock_event_handle(struct virtio_vsock * vsock,struct virtio_vsock_event * event)380*4882a593Smuzhiyun static void virtio_vsock_event_handle(struct virtio_vsock *vsock,
381*4882a593Smuzhiyun struct virtio_vsock_event *event)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun switch (le32_to_cpu(event->id)) {
384*4882a593Smuzhiyun case VIRTIO_VSOCK_EVENT_TRANSPORT_RESET:
385*4882a593Smuzhiyun virtio_vsock_update_guest_cid(vsock);
386*4882a593Smuzhiyun vsock_for_each_connected_socket(virtio_vsock_reset_sock);
387*4882a593Smuzhiyun break;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
virtio_transport_event_work(struct work_struct * work)391*4882a593Smuzhiyun static void virtio_transport_event_work(struct work_struct *work)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct virtio_vsock *vsock =
394*4882a593Smuzhiyun container_of(work, struct virtio_vsock, event_work);
395*4882a593Smuzhiyun struct virtqueue *vq;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun vq = vsock->vqs[VSOCK_VQ_EVENT];
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun mutex_lock(&vsock->event_lock);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (!vsock->event_run)
402*4882a593Smuzhiyun goto out;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun do {
405*4882a593Smuzhiyun struct virtio_vsock_event *event;
406*4882a593Smuzhiyun unsigned int len;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun virtqueue_disable_cb(vq);
409*4882a593Smuzhiyun while ((event = virtqueue_get_buf(vq, &len)) != NULL) {
410*4882a593Smuzhiyun if (len == sizeof(*event))
411*4882a593Smuzhiyun virtio_vsock_event_handle(vsock, event);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun virtio_vsock_event_fill_one(vsock, event);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun } while (!virtqueue_enable_cb(vq));
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun virtqueue_kick(vsock->vqs[VSOCK_VQ_EVENT]);
418*4882a593Smuzhiyun out:
419*4882a593Smuzhiyun mutex_unlock(&vsock->event_lock);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
virtio_vsock_event_done(struct virtqueue * vq)422*4882a593Smuzhiyun static void virtio_vsock_event_done(struct virtqueue *vq)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun struct virtio_vsock *vsock = vq->vdev->priv;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (!vsock)
427*4882a593Smuzhiyun return;
428*4882a593Smuzhiyun queue_work(virtio_vsock_workqueue, &vsock->event_work);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
virtio_vsock_tx_done(struct virtqueue * vq)431*4882a593Smuzhiyun static void virtio_vsock_tx_done(struct virtqueue *vq)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct virtio_vsock *vsock = vq->vdev->priv;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (!vsock)
436*4882a593Smuzhiyun return;
437*4882a593Smuzhiyun queue_work(virtio_vsock_workqueue, &vsock->tx_work);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
virtio_vsock_rx_done(struct virtqueue * vq)440*4882a593Smuzhiyun static void virtio_vsock_rx_done(struct virtqueue *vq)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun struct virtio_vsock *vsock = vq->vdev->priv;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun if (!vsock)
445*4882a593Smuzhiyun return;
446*4882a593Smuzhiyun queue_work(virtio_vsock_workqueue, &vsock->rx_work);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun static struct virtio_transport virtio_transport = {
450*4882a593Smuzhiyun .transport = {
451*4882a593Smuzhiyun .module = THIS_MODULE,
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun .get_local_cid = virtio_transport_get_local_cid,
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun .init = virtio_transport_do_socket_init,
456*4882a593Smuzhiyun .destruct = virtio_transport_destruct,
457*4882a593Smuzhiyun .release = virtio_transport_release,
458*4882a593Smuzhiyun .connect = virtio_transport_connect,
459*4882a593Smuzhiyun .shutdown = virtio_transport_shutdown,
460*4882a593Smuzhiyun .cancel_pkt = virtio_transport_cancel_pkt,
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun .dgram_bind = virtio_transport_dgram_bind,
463*4882a593Smuzhiyun .dgram_dequeue = virtio_transport_dgram_dequeue,
464*4882a593Smuzhiyun .dgram_enqueue = virtio_transport_dgram_enqueue,
465*4882a593Smuzhiyun .dgram_allow = virtio_transport_dgram_allow,
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun .stream_dequeue = virtio_transport_stream_dequeue,
468*4882a593Smuzhiyun .stream_enqueue = virtio_transport_stream_enqueue,
469*4882a593Smuzhiyun .stream_has_data = virtio_transport_stream_has_data,
470*4882a593Smuzhiyun .stream_has_space = virtio_transport_stream_has_space,
471*4882a593Smuzhiyun .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
472*4882a593Smuzhiyun .stream_is_active = virtio_transport_stream_is_active,
473*4882a593Smuzhiyun .stream_allow = virtio_transport_stream_allow,
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun .notify_poll_in = virtio_transport_notify_poll_in,
476*4882a593Smuzhiyun .notify_poll_out = virtio_transport_notify_poll_out,
477*4882a593Smuzhiyun .notify_recv_init = virtio_transport_notify_recv_init,
478*4882a593Smuzhiyun .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
479*4882a593Smuzhiyun .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
480*4882a593Smuzhiyun .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
481*4882a593Smuzhiyun .notify_send_init = virtio_transport_notify_send_init,
482*4882a593Smuzhiyun .notify_send_pre_block = virtio_transport_notify_send_pre_block,
483*4882a593Smuzhiyun .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
484*4882a593Smuzhiyun .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
485*4882a593Smuzhiyun .notify_buffer_size = virtio_transport_notify_buffer_size,
486*4882a593Smuzhiyun },
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun .send_pkt = virtio_transport_send_pkt,
489*4882a593Smuzhiyun };
490*4882a593Smuzhiyun
virtio_transport_rx_work(struct work_struct * work)491*4882a593Smuzhiyun static void virtio_transport_rx_work(struct work_struct *work)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun struct virtio_vsock *vsock =
494*4882a593Smuzhiyun container_of(work, struct virtio_vsock, rx_work);
495*4882a593Smuzhiyun struct virtqueue *vq;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun vq = vsock->vqs[VSOCK_VQ_RX];
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun mutex_lock(&vsock->rx_lock);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (!vsock->rx_run)
502*4882a593Smuzhiyun goto out;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun do {
505*4882a593Smuzhiyun virtqueue_disable_cb(vq);
506*4882a593Smuzhiyun for (;;) {
507*4882a593Smuzhiyun struct virtio_vsock_pkt *pkt;
508*4882a593Smuzhiyun unsigned int len;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (!virtio_transport_more_replies(vsock)) {
511*4882a593Smuzhiyun /* Stop rx until the device processes already
512*4882a593Smuzhiyun * pending replies. Leave rx virtqueue
513*4882a593Smuzhiyun * callbacks disabled.
514*4882a593Smuzhiyun */
515*4882a593Smuzhiyun goto out;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun pkt = virtqueue_get_buf(vq, &len);
519*4882a593Smuzhiyun if (!pkt) {
520*4882a593Smuzhiyun break;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun vsock->rx_buf_nr--;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* Drop short/long packets */
526*4882a593Smuzhiyun if (unlikely(len < sizeof(pkt->hdr) ||
527*4882a593Smuzhiyun len > sizeof(pkt->hdr) + pkt->len)) {
528*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
529*4882a593Smuzhiyun continue;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun pkt->len = len - sizeof(pkt->hdr);
533*4882a593Smuzhiyun virtio_transport_deliver_tap_pkt(pkt);
534*4882a593Smuzhiyun virtio_transport_recv_pkt(&virtio_transport, pkt);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun } while (!virtqueue_enable_cb(vq));
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun out:
539*4882a593Smuzhiyun if (vsock->rx_buf_nr < vsock->rx_buf_max_nr / 2)
540*4882a593Smuzhiyun virtio_vsock_rx_fill(vsock);
541*4882a593Smuzhiyun mutex_unlock(&vsock->rx_lock);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
virtio_vsock_probe(struct virtio_device * vdev)544*4882a593Smuzhiyun static int virtio_vsock_probe(struct virtio_device *vdev)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun vq_callback_t *callbacks[] = {
547*4882a593Smuzhiyun virtio_vsock_rx_done,
548*4882a593Smuzhiyun virtio_vsock_tx_done,
549*4882a593Smuzhiyun virtio_vsock_event_done,
550*4882a593Smuzhiyun };
551*4882a593Smuzhiyun static const char * const names[] = {
552*4882a593Smuzhiyun "rx",
553*4882a593Smuzhiyun "tx",
554*4882a593Smuzhiyun "event",
555*4882a593Smuzhiyun };
556*4882a593Smuzhiyun struct virtio_vsock *vsock = NULL;
557*4882a593Smuzhiyun int ret;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
560*4882a593Smuzhiyun if (ret)
561*4882a593Smuzhiyun return ret;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /* Only one virtio-vsock device per guest is supported */
564*4882a593Smuzhiyun if (rcu_dereference_protected(the_virtio_vsock,
565*4882a593Smuzhiyun lockdep_is_held(&the_virtio_vsock_mutex))) {
566*4882a593Smuzhiyun ret = -EBUSY;
567*4882a593Smuzhiyun goto out;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun vsock = kzalloc(sizeof(*vsock), GFP_KERNEL);
571*4882a593Smuzhiyun if (!vsock) {
572*4882a593Smuzhiyun ret = -ENOMEM;
573*4882a593Smuzhiyun goto out;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun vsock->vdev = vdev;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun ret = virtio_find_vqs(vsock->vdev, VSOCK_VQ_MAX,
579*4882a593Smuzhiyun vsock->vqs, callbacks, names,
580*4882a593Smuzhiyun NULL);
581*4882a593Smuzhiyun if (ret < 0)
582*4882a593Smuzhiyun goto out;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun virtio_vsock_update_guest_cid(vsock);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun vsock->rx_buf_nr = 0;
587*4882a593Smuzhiyun vsock->rx_buf_max_nr = 0;
588*4882a593Smuzhiyun atomic_set(&vsock->queued_replies, 0);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun mutex_init(&vsock->tx_lock);
591*4882a593Smuzhiyun mutex_init(&vsock->rx_lock);
592*4882a593Smuzhiyun mutex_init(&vsock->event_lock);
593*4882a593Smuzhiyun spin_lock_init(&vsock->send_pkt_list_lock);
594*4882a593Smuzhiyun INIT_LIST_HEAD(&vsock->send_pkt_list);
595*4882a593Smuzhiyun INIT_WORK(&vsock->rx_work, virtio_transport_rx_work);
596*4882a593Smuzhiyun INIT_WORK(&vsock->tx_work, virtio_transport_tx_work);
597*4882a593Smuzhiyun INIT_WORK(&vsock->event_work, virtio_transport_event_work);
598*4882a593Smuzhiyun INIT_WORK(&vsock->send_pkt_work, virtio_transport_send_pkt_work);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun mutex_lock(&vsock->tx_lock);
601*4882a593Smuzhiyun vsock->tx_run = true;
602*4882a593Smuzhiyun mutex_unlock(&vsock->tx_lock);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun mutex_lock(&vsock->rx_lock);
605*4882a593Smuzhiyun virtio_vsock_rx_fill(vsock);
606*4882a593Smuzhiyun vsock->rx_run = true;
607*4882a593Smuzhiyun mutex_unlock(&vsock->rx_lock);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun mutex_lock(&vsock->event_lock);
610*4882a593Smuzhiyun virtio_vsock_event_fill(vsock);
611*4882a593Smuzhiyun vsock->event_run = true;
612*4882a593Smuzhiyun mutex_unlock(&vsock->event_lock);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun vdev->priv = vsock;
615*4882a593Smuzhiyun rcu_assign_pointer(the_virtio_vsock, vsock);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun mutex_unlock(&the_virtio_vsock_mutex);
618*4882a593Smuzhiyun return 0;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun out:
621*4882a593Smuzhiyun kfree(vsock);
622*4882a593Smuzhiyun mutex_unlock(&the_virtio_vsock_mutex);
623*4882a593Smuzhiyun return ret;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
virtio_vsock_remove(struct virtio_device * vdev)626*4882a593Smuzhiyun static void virtio_vsock_remove(struct virtio_device *vdev)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun struct virtio_vsock *vsock = vdev->priv;
629*4882a593Smuzhiyun struct virtio_vsock_pkt *pkt;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun mutex_lock(&the_virtio_vsock_mutex);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun vdev->priv = NULL;
634*4882a593Smuzhiyun rcu_assign_pointer(the_virtio_vsock, NULL);
635*4882a593Smuzhiyun synchronize_rcu();
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /* Reset all connected sockets when the device disappear */
638*4882a593Smuzhiyun vsock_for_each_connected_socket(virtio_vsock_reset_sock);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /* Stop all work handlers to make sure no one is accessing the device,
641*4882a593Smuzhiyun * so we can safely call vdev->config->reset().
642*4882a593Smuzhiyun */
643*4882a593Smuzhiyun mutex_lock(&vsock->rx_lock);
644*4882a593Smuzhiyun vsock->rx_run = false;
645*4882a593Smuzhiyun mutex_unlock(&vsock->rx_lock);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun mutex_lock(&vsock->tx_lock);
648*4882a593Smuzhiyun vsock->tx_run = false;
649*4882a593Smuzhiyun mutex_unlock(&vsock->tx_lock);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun mutex_lock(&vsock->event_lock);
652*4882a593Smuzhiyun vsock->event_run = false;
653*4882a593Smuzhiyun mutex_unlock(&vsock->event_lock);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* Flush all device writes and interrupts, device will not use any
656*4882a593Smuzhiyun * more buffers.
657*4882a593Smuzhiyun */
658*4882a593Smuzhiyun vdev->config->reset(vdev);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun mutex_lock(&vsock->rx_lock);
661*4882a593Smuzhiyun while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_RX])))
662*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
663*4882a593Smuzhiyun mutex_unlock(&vsock->rx_lock);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun mutex_lock(&vsock->tx_lock);
666*4882a593Smuzhiyun while ((pkt = virtqueue_detach_unused_buf(vsock->vqs[VSOCK_VQ_TX])))
667*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
668*4882a593Smuzhiyun mutex_unlock(&vsock->tx_lock);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun spin_lock_bh(&vsock->send_pkt_list_lock);
671*4882a593Smuzhiyun while (!list_empty(&vsock->send_pkt_list)) {
672*4882a593Smuzhiyun pkt = list_first_entry(&vsock->send_pkt_list,
673*4882a593Smuzhiyun struct virtio_vsock_pkt, list);
674*4882a593Smuzhiyun list_del(&pkt->list);
675*4882a593Smuzhiyun virtio_transport_free_pkt(pkt);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun spin_unlock_bh(&vsock->send_pkt_list_lock);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* Delete virtqueues and flush outstanding callbacks if any */
680*4882a593Smuzhiyun vdev->config->del_vqs(vdev);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* Other works can be queued before 'config->del_vqs()', so we flush
683*4882a593Smuzhiyun * all works before to free the vsock object to avoid use after free.
684*4882a593Smuzhiyun */
685*4882a593Smuzhiyun flush_work(&vsock->rx_work);
686*4882a593Smuzhiyun flush_work(&vsock->tx_work);
687*4882a593Smuzhiyun flush_work(&vsock->event_work);
688*4882a593Smuzhiyun flush_work(&vsock->send_pkt_work);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun mutex_unlock(&the_virtio_vsock_mutex);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun kfree(vsock);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun static struct virtio_device_id id_table[] = {
696*4882a593Smuzhiyun { VIRTIO_ID_VSOCK, VIRTIO_DEV_ANY_ID },
697*4882a593Smuzhiyun { 0 },
698*4882a593Smuzhiyun };
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun static unsigned int features[] = {
701*4882a593Smuzhiyun };
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun static struct virtio_driver virtio_vsock_driver = {
704*4882a593Smuzhiyun .feature_table = features,
705*4882a593Smuzhiyun .feature_table_size = ARRAY_SIZE(features),
706*4882a593Smuzhiyun .driver.name = KBUILD_MODNAME,
707*4882a593Smuzhiyun .driver.owner = THIS_MODULE,
708*4882a593Smuzhiyun .id_table = id_table,
709*4882a593Smuzhiyun .probe = virtio_vsock_probe,
710*4882a593Smuzhiyun .remove = virtio_vsock_remove,
711*4882a593Smuzhiyun };
712*4882a593Smuzhiyun
virtio_vsock_init(void)713*4882a593Smuzhiyun static int __init virtio_vsock_init(void)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun int ret;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
718*4882a593Smuzhiyun if (!virtio_vsock_workqueue)
719*4882a593Smuzhiyun return -ENOMEM;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun ret = vsock_core_register(&virtio_transport.transport,
722*4882a593Smuzhiyun VSOCK_TRANSPORT_F_G2H);
723*4882a593Smuzhiyun if (ret)
724*4882a593Smuzhiyun goto out_wq;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun ret = register_virtio_driver(&virtio_vsock_driver);
727*4882a593Smuzhiyun if (ret)
728*4882a593Smuzhiyun goto out_vci;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun return 0;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun out_vci:
733*4882a593Smuzhiyun vsock_core_unregister(&virtio_transport.transport);
734*4882a593Smuzhiyun out_wq:
735*4882a593Smuzhiyun destroy_workqueue(virtio_vsock_workqueue);
736*4882a593Smuzhiyun return ret;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
virtio_vsock_exit(void)739*4882a593Smuzhiyun static void __exit virtio_vsock_exit(void)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun unregister_virtio_driver(&virtio_vsock_driver);
742*4882a593Smuzhiyun vsock_core_unregister(&virtio_transport.transport);
743*4882a593Smuzhiyun destroy_workqueue(virtio_vsock_workqueue);
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun module_init(virtio_vsock_init);
747*4882a593Smuzhiyun module_exit(virtio_vsock_exit);
748*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
749*4882a593Smuzhiyun MODULE_AUTHOR("Asias He");
750*4882a593Smuzhiyun MODULE_DESCRIPTION("virtio transport for vsock");
751*4882a593Smuzhiyun MODULE_DEVICE_TABLE(virtio, id_table);
752