1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * The Virtio 9p transport driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This is a block based transport driver based on the lguest block driver
6*4882a593Smuzhiyun * code.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Based on virtio console driver
11*4882a593Smuzhiyun * Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/in.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/net.h>
19*4882a593Smuzhiyun #include <linux/ipv6.h>
20*4882a593Smuzhiyun #include <linux/errno.h>
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/un.h>
23*4882a593Smuzhiyun #include <linux/uaccess.h>
24*4882a593Smuzhiyun #include <linux/inet.h>
25*4882a593Smuzhiyun #include <linux/idr.h>
26*4882a593Smuzhiyun #include <linux/file.h>
27*4882a593Smuzhiyun #include <linux/highmem.h>
28*4882a593Smuzhiyun #include <linux/slab.h>
29*4882a593Smuzhiyun #include <net/9p/9p.h>
30*4882a593Smuzhiyun #include <linux/parser.h>
31*4882a593Smuzhiyun #include <net/9p/client.h>
32*4882a593Smuzhiyun #include <net/9p/transport.h>
33*4882a593Smuzhiyun #include <linux/scatterlist.h>
34*4882a593Smuzhiyun #include <linux/swap.h>
35*4882a593Smuzhiyun #include <linux/virtio.h>
36*4882a593Smuzhiyun #include <linux/virtio_9p.h>
37*4882a593Smuzhiyun #include "trans_common.h"
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define VIRTQUEUE_NUM 128
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* a single mutex to manage channel initialization and attachment */
42*4882a593Smuzhiyun static DEFINE_MUTEX(virtio_9p_lock);
43*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
44*4882a593Smuzhiyun static atomic_t vp_pinned = ATOMIC_INIT(0);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /**
47*4882a593Smuzhiyun * struct virtio_chan - per-instance transport information
48*4882a593Smuzhiyun * @inuse: whether the channel is in use
49*4882a593Smuzhiyun * @lock: protects multiple elements within this structure
50*4882a593Smuzhiyun * @client: client instance
51*4882a593Smuzhiyun * @vdev: virtio dev associated with this channel
52*4882a593Smuzhiyun * @vq: virtio queue associated with this channel
53*4882a593Smuzhiyun * @sg: scatter gather list which is used to pack a request (protected?)
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * We keep all per-channel information in a structure.
56*4882a593Smuzhiyun * This structure is allocated within the devices dev->mem space.
57*4882a593Smuzhiyun * A pointer to the structure will get put in the transport private.
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun struct virtio_chan {
62*4882a593Smuzhiyun bool inuse;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun spinlock_t lock;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct p9_client *client;
67*4882a593Smuzhiyun struct virtio_device *vdev;
68*4882a593Smuzhiyun struct virtqueue *vq;
69*4882a593Smuzhiyun int ring_bufs_avail;
70*4882a593Smuzhiyun wait_queue_head_t *vc_wq;
71*4882a593Smuzhiyun /* This is global limit. Since we don't have a global structure,
72*4882a593Smuzhiyun * will be placing it in each channel.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun unsigned long p9_max_pages;
75*4882a593Smuzhiyun /* Scatterlist: can be too big for stack. */
76*4882a593Smuzhiyun struct scatterlist sg[VIRTQUEUE_NUM];
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * tag name to identify a mount null terminated
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun char *tag;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun struct list_head chan_list;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun static struct list_head virtio_chan_list;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* How many bytes left in this page. */
rest_of_page(void * data)88*4882a593Smuzhiyun static unsigned int rest_of_page(void *data)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun return PAGE_SIZE - offset_in_page(data);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun * p9_virtio_close - reclaim resources of a channel
95*4882a593Smuzhiyun * @client: client instance
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * This reclaims a channel by freeing its resources and
98*4882a593Smuzhiyun * reseting its inuse flag.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun
p9_virtio_close(struct p9_client * client)102*4882a593Smuzhiyun static void p9_virtio_close(struct p9_client *client)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun struct virtio_chan *chan = client->trans;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun mutex_lock(&virtio_9p_lock);
107*4882a593Smuzhiyun if (chan)
108*4882a593Smuzhiyun chan->inuse = false;
109*4882a593Smuzhiyun mutex_unlock(&virtio_9p_lock);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun * req_done - callback which signals activity from the server
114*4882a593Smuzhiyun * @vq: virtio queue activity was received on
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * This notifies us that the server has triggered some activity
117*4882a593Smuzhiyun * on the virtio channel - most likely a response to request we
118*4882a593Smuzhiyun * sent. Figure out which requests now have responses and wake up
119*4882a593Smuzhiyun * those threads.
120*4882a593Smuzhiyun *
121*4882a593Smuzhiyun * Bugs: could do with some additional sanity checking, but appears to work.
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun
req_done(struct virtqueue * vq)125*4882a593Smuzhiyun static void req_done(struct virtqueue *vq)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct virtio_chan *chan = vq->vdev->priv;
128*4882a593Smuzhiyun unsigned int len;
129*4882a593Smuzhiyun struct p9_req_t *req;
130*4882a593Smuzhiyun bool need_wakeup = false;
131*4882a593Smuzhiyun unsigned long flags;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun p9_debug(P9_DEBUG_TRANS, ": request done\n");
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
136*4882a593Smuzhiyun while ((req = virtqueue_get_buf(chan->vq, &len)) != NULL) {
137*4882a593Smuzhiyun if (!chan->ring_bufs_avail) {
138*4882a593Smuzhiyun chan->ring_bufs_avail = 1;
139*4882a593Smuzhiyun need_wakeup = true;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (len) {
143*4882a593Smuzhiyun req->rc.size = len;
144*4882a593Smuzhiyun p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
148*4882a593Smuzhiyun /* Wakeup if anyone waiting for VirtIO ring space. */
149*4882a593Smuzhiyun if (need_wakeup)
150*4882a593Smuzhiyun wake_up(chan->vc_wq);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /**
154*4882a593Smuzhiyun * pack_sg_list - pack a scatter gather list from a linear buffer
155*4882a593Smuzhiyun * @sg: scatter/gather list to pack into
156*4882a593Smuzhiyun * @start: which segment of the sg_list to start at
157*4882a593Smuzhiyun * @limit: maximum segment to pack data to
158*4882a593Smuzhiyun * @data: data to pack into scatter/gather list
159*4882a593Smuzhiyun * @count: amount of data to pack into the scatter/gather list
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun * sg_lists have multiple segments of various sizes. This will pack
162*4882a593Smuzhiyun * arbitrary data into an existing scatter gather list, segmenting the
163*4882a593Smuzhiyun * data as necessary within constraints.
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun
pack_sg_list(struct scatterlist * sg,int start,int limit,char * data,int count)167*4882a593Smuzhiyun static int pack_sg_list(struct scatterlist *sg, int start,
168*4882a593Smuzhiyun int limit, char *data, int count)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun int s;
171*4882a593Smuzhiyun int index = start;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun while (count) {
174*4882a593Smuzhiyun s = rest_of_page(data);
175*4882a593Smuzhiyun if (s > count)
176*4882a593Smuzhiyun s = count;
177*4882a593Smuzhiyun BUG_ON(index >= limit);
178*4882a593Smuzhiyun /* Make sure we don't terminate early. */
179*4882a593Smuzhiyun sg_unmark_end(&sg[index]);
180*4882a593Smuzhiyun sg_set_buf(&sg[index++], data, s);
181*4882a593Smuzhiyun count -= s;
182*4882a593Smuzhiyun data += s;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun if (index-start)
185*4882a593Smuzhiyun sg_mark_end(&sg[index - 1]);
186*4882a593Smuzhiyun return index-start;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* We don't currently allow canceling of virtio requests */
p9_virtio_cancel(struct p9_client * client,struct p9_req_t * req)190*4882a593Smuzhiyun static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun return 1;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* Reply won't come, so drop req ref */
p9_virtio_cancelled(struct p9_client * client,struct p9_req_t * req)196*4882a593Smuzhiyun static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun p9_req_put(req);
199*4882a593Smuzhiyun return 0;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
204*4882a593Smuzhiyun * this takes a list of pages.
205*4882a593Smuzhiyun * @sg: scatter/gather list to pack into
206*4882a593Smuzhiyun * @start: which segment of the sg_list to start at
207*4882a593Smuzhiyun * @pdata: a list of pages to add into sg.
208*4882a593Smuzhiyun * @nr_pages: number of pages to pack into the scatter/gather list
209*4882a593Smuzhiyun * @offs: amount of data in the beginning of first page _not_ to pack
210*4882a593Smuzhiyun * @count: amount of data to pack into the scatter/gather list
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun static int
pack_sg_list_p(struct scatterlist * sg,int start,int limit,struct page ** pdata,int nr_pages,size_t offs,int count)213*4882a593Smuzhiyun pack_sg_list_p(struct scatterlist *sg, int start, int limit,
214*4882a593Smuzhiyun struct page **pdata, int nr_pages, size_t offs, int count)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun int i = 0, s;
217*4882a593Smuzhiyun int data_off = offs;
218*4882a593Smuzhiyun int index = start;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun BUG_ON(nr_pages > (limit - start));
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * if the first page doesn't start at
223*4882a593Smuzhiyun * page boundary find the offset
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun while (nr_pages) {
226*4882a593Smuzhiyun s = PAGE_SIZE - data_off;
227*4882a593Smuzhiyun if (s > count)
228*4882a593Smuzhiyun s = count;
229*4882a593Smuzhiyun BUG_ON(index >= limit);
230*4882a593Smuzhiyun /* Make sure we don't terminate early. */
231*4882a593Smuzhiyun sg_unmark_end(&sg[index]);
232*4882a593Smuzhiyun sg_set_page(&sg[index++], pdata[i++], s, data_off);
233*4882a593Smuzhiyun data_off = 0;
234*4882a593Smuzhiyun count -= s;
235*4882a593Smuzhiyun nr_pages--;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (index-start)
239*4882a593Smuzhiyun sg_mark_end(&sg[index - 1]);
240*4882a593Smuzhiyun return index - start;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun * p9_virtio_request - issue a request
245*4882a593Smuzhiyun * @client: client instance issuing the request
246*4882a593Smuzhiyun * @req: request to be issued
247*4882a593Smuzhiyun *
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun static int
p9_virtio_request(struct p9_client * client,struct p9_req_t * req)251*4882a593Smuzhiyun p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun int err;
254*4882a593Smuzhiyun int in, out, out_sgs, in_sgs;
255*4882a593Smuzhiyun unsigned long flags;
256*4882a593Smuzhiyun struct virtio_chan *chan = client->trans;
257*4882a593Smuzhiyun struct scatterlist *sgs[2];
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun req->status = REQ_STATUS_SENT;
262*4882a593Smuzhiyun req_retry:
263*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun out_sgs = in_sgs = 0;
266*4882a593Smuzhiyun /* Handle out VirtIO ring buffers */
267*4882a593Smuzhiyun out = pack_sg_list(chan->sg, 0,
268*4882a593Smuzhiyun VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
269*4882a593Smuzhiyun if (out)
270*4882a593Smuzhiyun sgs[out_sgs++] = chan->sg;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun in = pack_sg_list(chan->sg, out,
273*4882a593Smuzhiyun VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity);
274*4882a593Smuzhiyun if (in)
275*4882a593Smuzhiyun sgs[out_sgs + in_sgs++] = chan->sg + out;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
278*4882a593Smuzhiyun GFP_ATOMIC);
279*4882a593Smuzhiyun if (err < 0) {
280*4882a593Smuzhiyun if (err == -ENOSPC) {
281*4882a593Smuzhiyun chan->ring_bufs_avail = 0;
282*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
283*4882a593Smuzhiyun err = wait_event_killable(*chan->vc_wq,
284*4882a593Smuzhiyun chan->ring_bufs_avail);
285*4882a593Smuzhiyun if (err == -ERESTARTSYS)
286*4882a593Smuzhiyun return err;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
289*4882a593Smuzhiyun goto req_retry;
290*4882a593Smuzhiyun } else {
291*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
292*4882a593Smuzhiyun p9_debug(P9_DEBUG_TRANS,
293*4882a593Smuzhiyun "virtio rpc add_sgs returned failure\n");
294*4882a593Smuzhiyun return -EIO;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun virtqueue_kick(chan->vq);
298*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
p9_get_mapped_pages(struct virtio_chan * chan,struct page *** pages,struct iov_iter * data,int count,size_t * offs,int * need_drop)304*4882a593Smuzhiyun static int p9_get_mapped_pages(struct virtio_chan *chan,
305*4882a593Smuzhiyun struct page ***pages,
306*4882a593Smuzhiyun struct iov_iter *data,
307*4882a593Smuzhiyun int count,
308*4882a593Smuzhiyun size_t *offs,
309*4882a593Smuzhiyun int *need_drop)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun int nr_pages;
312*4882a593Smuzhiyun int err;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (!iov_iter_count(data))
315*4882a593Smuzhiyun return 0;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (!iov_iter_is_kvec(data)) {
318*4882a593Smuzhiyun int n;
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * We allow only p9_max_pages pinned. We wait for the
321*4882a593Smuzhiyun * Other zc request to finish here
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
324*4882a593Smuzhiyun err = wait_event_killable(vp_wq,
325*4882a593Smuzhiyun (atomic_read(&vp_pinned) < chan->p9_max_pages));
326*4882a593Smuzhiyun if (err == -ERESTARTSYS)
327*4882a593Smuzhiyun return err;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun n = iov_iter_get_pages_alloc(data, pages, count, offs);
330*4882a593Smuzhiyun if (n < 0)
331*4882a593Smuzhiyun return n;
332*4882a593Smuzhiyun *need_drop = 1;
333*4882a593Smuzhiyun nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
334*4882a593Smuzhiyun atomic_add(nr_pages, &vp_pinned);
335*4882a593Smuzhiyun return n;
336*4882a593Smuzhiyun } else {
337*4882a593Smuzhiyun /* kernel buffer, no need to pin pages */
338*4882a593Smuzhiyun int index;
339*4882a593Smuzhiyun size_t len;
340*4882a593Smuzhiyun void *p;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* we'd already checked that it's non-empty */
343*4882a593Smuzhiyun while (1) {
344*4882a593Smuzhiyun len = iov_iter_single_seg_count(data);
345*4882a593Smuzhiyun if (likely(len)) {
346*4882a593Smuzhiyun p = data->kvec->iov_base + data->iov_offset;
347*4882a593Smuzhiyun break;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun iov_iter_advance(data, 0);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun if (len > count)
352*4882a593Smuzhiyun len = count;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
355*4882a593Smuzhiyun (unsigned long)p / PAGE_SIZE;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun *pages = kmalloc_array(nr_pages, sizeof(struct page *),
358*4882a593Smuzhiyun GFP_NOFS);
359*4882a593Smuzhiyun if (!*pages)
360*4882a593Smuzhiyun return -ENOMEM;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun *need_drop = 0;
363*4882a593Smuzhiyun p -= (*offs = offset_in_page(p));
364*4882a593Smuzhiyun for (index = 0; index < nr_pages; index++) {
365*4882a593Smuzhiyun if (is_vmalloc_addr(p))
366*4882a593Smuzhiyun (*pages)[index] = vmalloc_to_page(p);
367*4882a593Smuzhiyun else
368*4882a593Smuzhiyun (*pages)[index] = kmap_to_page(p);
369*4882a593Smuzhiyun p += PAGE_SIZE;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun return len;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /**
376*4882a593Smuzhiyun * p9_virtio_zc_request - issue a zero copy request
377*4882a593Smuzhiyun * @client: client instance issuing the request
378*4882a593Smuzhiyun * @req: request to be issued
379*4882a593Smuzhiyun * @uidata: user buffer that should be used for zero copy read
380*4882a593Smuzhiyun * @uodata: user buffer that should be used for zero copy write
381*4882a593Smuzhiyun * @inlen: read buffer size
382*4882a593Smuzhiyun * @outlen: write buffer size
383*4882a593Smuzhiyun * @in_hdr_len: reader header size, This is the size of response protocol data
384*4882a593Smuzhiyun *
385*4882a593Smuzhiyun */
386*4882a593Smuzhiyun static int
p9_virtio_zc_request(struct p9_client * client,struct p9_req_t * req,struct iov_iter * uidata,struct iov_iter * uodata,int inlen,int outlen,int in_hdr_len)387*4882a593Smuzhiyun p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
388*4882a593Smuzhiyun struct iov_iter *uidata, struct iov_iter *uodata,
389*4882a593Smuzhiyun int inlen, int outlen, int in_hdr_len)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun int in, out, err, out_sgs, in_sgs;
392*4882a593Smuzhiyun unsigned long flags;
393*4882a593Smuzhiyun int in_nr_pages = 0, out_nr_pages = 0;
394*4882a593Smuzhiyun struct page **in_pages = NULL, **out_pages = NULL;
395*4882a593Smuzhiyun struct virtio_chan *chan = client->trans;
396*4882a593Smuzhiyun struct scatterlist *sgs[4];
397*4882a593Smuzhiyun size_t offs;
398*4882a593Smuzhiyun int need_drop = 0;
399*4882a593Smuzhiyun int kicked = 0;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun p9_debug(P9_DEBUG_TRANS, "virtio request\n");
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun if (uodata) {
404*4882a593Smuzhiyun __le32 sz;
405*4882a593Smuzhiyun int n = p9_get_mapped_pages(chan, &out_pages, uodata,
406*4882a593Smuzhiyun outlen, &offs, &need_drop);
407*4882a593Smuzhiyun if (n < 0) {
408*4882a593Smuzhiyun err = n;
409*4882a593Smuzhiyun goto err_out;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
412*4882a593Smuzhiyun if (n != outlen) {
413*4882a593Smuzhiyun __le32 v = cpu_to_le32(n);
414*4882a593Smuzhiyun memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
415*4882a593Smuzhiyun outlen = n;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun /* The size field of the message must include the length of the
418*4882a593Smuzhiyun * header and the length of the data. We didn't actually know
419*4882a593Smuzhiyun * the length of the data until this point so add it in now.
420*4882a593Smuzhiyun */
421*4882a593Smuzhiyun sz = cpu_to_le32(req->tc.size + outlen);
422*4882a593Smuzhiyun memcpy(&req->tc.sdata[0], &sz, sizeof(sz));
423*4882a593Smuzhiyun } else if (uidata) {
424*4882a593Smuzhiyun int n = p9_get_mapped_pages(chan, &in_pages, uidata,
425*4882a593Smuzhiyun inlen, &offs, &need_drop);
426*4882a593Smuzhiyun if (n < 0) {
427*4882a593Smuzhiyun err = n;
428*4882a593Smuzhiyun goto err_out;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
431*4882a593Smuzhiyun if (n != inlen) {
432*4882a593Smuzhiyun __le32 v = cpu_to_le32(n);
433*4882a593Smuzhiyun memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
434*4882a593Smuzhiyun inlen = n;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun req->status = REQ_STATUS_SENT;
438*4882a593Smuzhiyun req_retry_pinned:
439*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun out_sgs = in_sgs = 0;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* out data */
444*4882a593Smuzhiyun out = pack_sg_list(chan->sg, 0,
445*4882a593Smuzhiyun VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (out)
448*4882a593Smuzhiyun sgs[out_sgs++] = chan->sg;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (out_pages) {
451*4882a593Smuzhiyun sgs[out_sgs++] = chan->sg + out;
452*4882a593Smuzhiyun out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
453*4882a593Smuzhiyun out_pages, out_nr_pages, offs, outlen);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /*
457*4882a593Smuzhiyun * Take care of in data
458*4882a593Smuzhiyun * For example TREAD have 11.
459*4882a593Smuzhiyun * 11 is the read/write header = PDU Header(7) + IO Size (4).
460*4882a593Smuzhiyun * Arrange in such a way that server places header in the
461*4882a593Smuzhiyun * alloced memory and payload onto the user buffer.
462*4882a593Smuzhiyun */
463*4882a593Smuzhiyun in = pack_sg_list(chan->sg, out,
464*4882a593Smuzhiyun VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len);
465*4882a593Smuzhiyun if (in)
466*4882a593Smuzhiyun sgs[out_sgs + in_sgs++] = chan->sg + out;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (in_pages) {
469*4882a593Smuzhiyun sgs[out_sgs + in_sgs++] = chan->sg + out + in;
470*4882a593Smuzhiyun in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
471*4882a593Smuzhiyun in_pages, in_nr_pages, offs, inlen);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
475*4882a593Smuzhiyun err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
476*4882a593Smuzhiyun GFP_ATOMIC);
477*4882a593Smuzhiyun if (err < 0) {
478*4882a593Smuzhiyun if (err == -ENOSPC) {
479*4882a593Smuzhiyun chan->ring_bufs_avail = 0;
480*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
481*4882a593Smuzhiyun err = wait_event_killable(*chan->vc_wq,
482*4882a593Smuzhiyun chan->ring_bufs_avail);
483*4882a593Smuzhiyun if (err == -ERESTARTSYS)
484*4882a593Smuzhiyun goto err_out;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
487*4882a593Smuzhiyun goto req_retry_pinned;
488*4882a593Smuzhiyun } else {
489*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
490*4882a593Smuzhiyun p9_debug(P9_DEBUG_TRANS,
491*4882a593Smuzhiyun "virtio rpc add_sgs returned failure\n");
492*4882a593Smuzhiyun err = -EIO;
493*4882a593Smuzhiyun goto err_out;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun virtqueue_kick(chan->vq);
497*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
498*4882a593Smuzhiyun kicked = 1;
499*4882a593Smuzhiyun p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
500*4882a593Smuzhiyun err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
501*4882a593Smuzhiyun /*
502*4882a593Smuzhiyun * Non kernel buffers are pinned, unpin them
503*4882a593Smuzhiyun */
504*4882a593Smuzhiyun err_out:
505*4882a593Smuzhiyun if (need_drop) {
506*4882a593Smuzhiyun if (in_pages) {
507*4882a593Smuzhiyun p9_release_pages(in_pages, in_nr_pages);
508*4882a593Smuzhiyun atomic_sub(in_nr_pages, &vp_pinned);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun if (out_pages) {
511*4882a593Smuzhiyun p9_release_pages(out_pages, out_nr_pages);
512*4882a593Smuzhiyun atomic_sub(out_nr_pages, &vp_pinned);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun /* wakeup anybody waiting for slots to pin pages */
515*4882a593Smuzhiyun wake_up(&vp_wq);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun kvfree(in_pages);
518*4882a593Smuzhiyun kvfree(out_pages);
519*4882a593Smuzhiyun if (!kicked) {
520*4882a593Smuzhiyun /* reply won't come */
521*4882a593Smuzhiyun p9_req_put(req);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun return err;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
p9_mount_tag_show(struct device * dev,struct device_attribute * attr,char * buf)526*4882a593Smuzhiyun static ssize_t p9_mount_tag_show(struct device *dev,
527*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun struct virtio_chan *chan;
530*4882a593Smuzhiyun struct virtio_device *vdev;
531*4882a593Smuzhiyun int tag_len;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun vdev = dev_to_virtio(dev);
534*4882a593Smuzhiyun chan = vdev->priv;
535*4882a593Smuzhiyun tag_len = strlen(chan->tag);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun memcpy(buf, chan->tag, tag_len + 1);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun return tag_len + 1;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /**
545*4882a593Smuzhiyun * p9_virtio_probe - probe for existence of 9P virtio channels
546*4882a593Smuzhiyun * @vdev: virtio device to probe
547*4882a593Smuzhiyun *
548*4882a593Smuzhiyun * This probes for existing virtio channels.
549*4882a593Smuzhiyun *
550*4882a593Smuzhiyun */
551*4882a593Smuzhiyun
p9_virtio_probe(struct virtio_device * vdev)552*4882a593Smuzhiyun static int p9_virtio_probe(struct virtio_device *vdev)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun __u16 tag_len;
555*4882a593Smuzhiyun char *tag;
556*4882a593Smuzhiyun int err;
557*4882a593Smuzhiyun struct virtio_chan *chan;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (!vdev->config->get) {
560*4882a593Smuzhiyun dev_err(&vdev->dev, "%s failure: config access disabled\n",
561*4882a593Smuzhiyun __func__);
562*4882a593Smuzhiyun return -EINVAL;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
566*4882a593Smuzhiyun if (!chan) {
567*4882a593Smuzhiyun pr_err("Failed to allocate virtio 9P channel\n");
568*4882a593Smuzhiyun err = -ENOMEM;
569*4882a593Smuzhiyun goto fail;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun chan->vdev = vdev;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* We expect one virtqueue, for requests. */
575*4882a593Smuzhiyun chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
576*4882a593Smuzhiyun if (IS_ERR(chan->vq)) {
577*4882a593Smuzhiyun err = PTR_ERR(chan->vq);
578*4882a593Smuzhiyun goto out_free_chan;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun chan->vq->vdev->priv = chan;
581*4882a593Smuzhiyun spin_lock_init(&chan->lock);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun sg_init_table(chan->sg, VIRTQUEUE_NUM);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun chan->inuse = false;
586*4882a593Smuzhiyun if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
587*4882a593Smuzhiyun virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
588*4882a593Smuzhiyun } else {
589*4882a593Smuzhiyun err = -EINVAL;
590*4882a593Smuzhiyun goto out_free_vq;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun tag = kzalloc(tag_len + 1, GFP_KERNEL);
593*4882a593Smuzhiyun if (!tag) {
594*4882a593Smuzhiyun err = -ENOMEM;
595*4882a593Smuzhiyun goto out_free_vq;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
599*4882a593Smuzhiyun tag, tag_len);
600*4882a593Smuzhiyun chan->tag = tag;
601*4882a593Smuzhiyun err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
602*4882a593Smuzhiyun if (err) {
603*4882a593Smuzhiyun goto out_free_tag;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
606*4882a593Smuzhiyun if (!chan->vc_wq) {
607*4882a593Smuzhiyun err = -ENOMEM;
608*4882a593Smuzhiyun goto out_remove_file;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun init_waitqueue_head(chan->vc_wq);
611*4882a593Smuzhiyun chan->ring_bufs_avail = 1;
612*4882a593Smuzhiyun /* Ceiling limit to avoid denial of service attacks */
613*4882a593Smuzhiyun chan->p9_max_pages = nr_free_buffer_pages()/4;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun virtio_device_ready(vdev);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun mutex_lock(&virtio_9p_lock);
618*4882a593Smuzhiyun list_add_tail(&chan->chan_list, &virtio_chan_list);
619*4882a593Smuzhiyun mutex_unlock(&virtio_9p_lock);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* Let udev rules use the new mount_tag attribute. */
622*4882a593Smuzhiyun kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun return 0;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun out_remove_file:
627*4882a593Smuzhiyun sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr);
628*4882a593Smuzhiyun out_free_tag:
629*4882a593Smuzhiyun kfree(tag);
630*4882a593Smuzhiyun out_free_vq:
631*4882a593Smuzhiyun vdev->config->del_vqs(vdev);
632*4882a593Smuzhiyun out_free_chan:
633*4882a593Smuzhiyun kfree(chan);
634*4882a593Smuzhiyun fail:
635*4882a593Smuzhiyun return err;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun /**
640*4882a593Smuzhiyun * p9_virtio_create - allocate a new virtio channel
641*4882a593Smuzhiyun * @client: client instance invoking this transport
642*4882a593Smuzhiyun * @devname: string identifying the channel to connect to (unused)
643*4882a593Smuzhiyun * @args: args passed from sys_mount() for per-transport options (unused)
644*4882a593Smuzhiyun *
645*4882a593Smuzhiyun * This sets up a transport channel for 9p communication. Right now
646*4882a593Smuzhiyun * we only match the first available channel, but eventually we couldlook up
647*4882a593Smuzhiyun * alternate channels by matching devname versus a virtio_config entry.
648*4882a593Smuzhiyun * We use a simple reference count mechanism to ensure that only a single
649*4882a593Smuzhiyun * mount has a channel open at a time.
650*4882a593Smuzhiyun *
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun static int
p9_virtio_create(struct p9_client * client,const char * devname,char * args)654*4882a593Smuzhiyun p9_virtio_create(struct p9_client *client, const char *devname, char *args)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun struct virtio_chan *chan;
657*4882a593Smuzhiyun int ret = -ENOENT;
658*4882a593Smuzhiyun int found = 0;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (devname == NULL)
661*4882a593Smuzhiyun return -EINVAL;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun mutex_lock(&virtio_9p_lock);
664*4882a593Smuzhiyun list_for_each_entry(chan, &virtio_chan_list, chan_list) {
665*4882a593Smuzhiyun if (!strcmp(devname, chan->tag)) {
666*4882a593Smuzhiyun if (!chan->inuse) {
667*4882a593Smuzhiyun chan->inuse = true;
668*4882a593Smuzhiyun found = 1;
669*4882a593Smuzhiyun break;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun ret = -EBUSY;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun mutex_unlock(&virtio_9p_lock);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (!found) {
677*4882a593Smuzhiyun pr_err("no channels available for device %s\n", devname);
678*4882a593Smuzhiyun return ret;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun client->trans = (void *)chan;
682*4882a593Smuzhiyun client->status = Connected;
683*4882a593Smuzhiyun chan->client = client;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun return 0;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun /**
689*4882a593Smuzhiyun * p9_virtio_remove - clean up resources associated with a virtio device
690*4882a593Smuzhiyun * @vdev: virtio device to remove
691*4882a593Smuzhiyun *
692*4882a593Smuzhiyun */
693*4882a593Smuzhiyun
p9_virtio_remove(struct virtio_device * vdev)694*4882a593Smuzhiyun static void p9_virtio_remove(struct virtio_device *vdev)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun struct virtio_chan *chan = vdev->priv;
697*4882a593Smuzhiyun unsigned long warning_time;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun mutex_lock(&virtio_9p_lock);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /* Remove self from list so we don't get new users. */
702*4882a593Smuzhiyun list_del(&chan->chan_list);
703*4882a593Smuzhiyun warning_time = jiffies;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun /* Wait for existing users to close. */
706*4882a593Smuzhiyun while (chan->inuse) {
707*4882a593Smuzhiyun mutex_unlock(&virtio_9p_lock);
708*4882a593Smuzhiyun msleep(250);
709*4882a593Smuzhiyun if (time_after(jiffies, warning_time + 10 * HZ)) {
710*4882a593Smuzhiyun dev_emerg(&vdev->dev,
711*4882a593Smuzhiyun "p9_virtio_remove: waiting for device in use.\n");
712*4882a593Smuzhiyun warning_time = jiffies;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun mutex_lock(&virtio_9p_lock);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun mutex_unlock(&virtio_9p_lock);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun vdev->config->reset(vdev);
720*4882a593Smuzhiyun vdev->config->del_vqs(vdev);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
723*4882a593Smuzhiyun kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
724*4882a593Smuzhiyun kfree(chan->tag);
725*4882a593Smuzhiyun kfree(chan->vc_wq);
726*4882a593Smuzhiyun kfree(chan);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun static struct virtio_device_id id_table[] = {
731*4882a593Smuzhiyun { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID },
732*4882a593Smuzhiyun { 0 },
733*4882a593Smuzhiyun };
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun static unsigned int features[] = {
736*4882a593Smuzhiyun VIRTIO_9P_MOUNT_TAG,
737*4882a593Smuzhiyun };
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /* The standard "struct lguest_driver": */
740*4882a593Smuzhiyun static struct virtio_driver p9_virtio_drv = {
741*4882a593Smuzhiyun .feature_table = features,
742*4882a593Smuzhiyun .feature_table_size = ARRAY_SIZE(features),
743*4882a593Smuzhiyun .driver.name = KBUILD_MODNAME,
744*4882a593Smuzhiyun .driver.owner = THIS_MODULE,
745*4882a593Smuzhiyun .id_table = id_table,
746*4882a593Smuzhiyun .probe = p9_virtio_probe,
747*4882a593Smuzhiyun .remove = p9_virtio_remove,
748*4882a593Smuzhiyun };
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun static struct p9_trans_module p9_virtio_trans = {
751*4882a593Smuzhiyun .name = "virtio",
752*4882a593Smuzhiyun .create = p9_virtio_create,
753*4882a593Smuzhiyun .close = p9_virtio_close,
754*4882a593Smuzhiyun .request = p9_virtio_request,
755*4882a593Smuzhiyun .zc_request = p9_virtio_zc_request,
756*4882a593Smuzhiyun .cancel = p9_virtio_cancel,
757*4882a593Smuzhiyun .cancelled = p9_virtio_cancelled,
758*4882a593Smuzhiyun /*
759*4882a593Smuzhiyun * We leave one entry for input and one entry for response
760*4882a593Smuzhiyun * headers. We also skip one more entry to accomodate, address
761*4882a593Smuzhiyun * that are not at page boundary, that can result in an extra
762*4882a593Smuzhiyun * page in zero copy.
763*4882a593Smuzhiyun */
764*4882a593Smuzhiyun .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
765*4882a593Smuzhiyun .def = 1,
766*4882a593Smuzhiyun .owner = THIS_MODULE,
767*4882a593Smuzhiyun };
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /* The standard init function */
p9_virtio_init(void)770*4882a593Smuzhiyun static int __init p9_virtio_init(void)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun int rc;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun INIT_LIST_HEAD(&virtio_chan_list);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun v9fs_register_trans(&p9_virtio_trans);
777*4882a593Smuzhiyun rc = register_virtio_driver(&p9_virtio_drv);
778*4882a593Smuzhiyun if (rc)
779*4882a593Smuzhiyun v9fs_unregister_trans(&p9_virtio_trans);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun return rc;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
p9_virtio_cleanup(void)784*4882a593Smuzhiyun static void __exit p9_virtio_cleanup(void)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun unregister_virtio_driver(&p9_virtio_drv);
787*4882a593Smuzhiyun v9fs_unregister_trans(&p9_virtio_trans);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun module_init(p9_virtio_init);
791*4882a593Smuzhiyun module_exit(p9_virtio_cleanup);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun MODULE_DEVICE_TABLE(virtio, id_table);
794*4882a593Smuzhiyun MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
795*4882a593Smuzhiyun MODULE_DESCRIPTION("Virtio 9p Transport");
796*4882a593Smuzhiyun MODULE_LICENSE("GPL");
797