1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Xenbus code for blkif backend
3*4882a593Smuzhiyun Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
4*4882a593Smuzhiyun Copyright (C) 2005 XenSource Ltd
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define pr_fmt(fmt) "xen-blkback: " fmt
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <stdarg.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/kthread.h>
14*4882a593Smuzhiyun #include <xen/events.h>
15*4882a593Smuzhiyun #include <xen/grant_table.h>
16*4882a593Smuzhiyun #include "common.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /* On the XenBus the max length of 'ring-ref%u'. */
19*4882a593Smuzhiyun #define RINGREF_NAME_LEN (20)
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun struct backend_info {
22*4882a593Smuzhiyun struct xenbus_device *dev;
23*4882a593Smuzhiyun struct xen_blkif *blkif;
24*4882a593Smuzhiyun struct xenbus_watch backend_watch;
25*4882a593Smuzhiyun unsigned major;
26*4882a593Smuzhiyun unsigned minor;
27*4882a593Smuzhiyun char *mode;
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static struct kmem_cache *xen_blkif_cachep;
31*4882a593Smuzhiyun static void connect(struct backend_info *);
32*4882a593Smuzhiyun static int connect_ring(struct backend_info *);
33*4882a593Smuzhiyun static void backend_changed(struct xenbus_watch *, const char *,
34*4882a593Smuzhiyun const char *);
35*4882a593Smuzhiyun static void xen_blkif_free(struct xen_blkif *blkif);
36*4882a593Smuzhiyun static void xen_vbd_free(struct xen_vbd *vbd);
37*4882a593Smuzhiyun
xen_blkbk_xenbus(struct backend_info * be)38*4882a593Smuzhiyun struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun return be->dev;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * The last request could free the device from softirq context and
45*4882a593Smuzhiyun * xen_blkif_free() can sleep.
46*4882a593Smuzhiyun */
xen_blkif_deferred_free(struct work_struct * work)47*4882a593Smuzhiyun static void xen_blkif_deferred_free(struct work_struct *work)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct xen_blkif *blkif;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun blkif = container_of(work, struct xen_blkif, free_work);
52*4882a593Smuzhiyun xen_blkif_free(blkif);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
blkback_name(struct xen_blkif * blkif,char * buf)55*4882a593Smuzhiyun static int blkback_name(struct xen_blkif *blkif, char *buf)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun char *devpath, *devname;
58*4882a593Smuzhiyun struct xenbus_device *dev = blkif->be->dev;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
61*4882a593Smuzhiyun if (IS_ERR(devpath))
62*4882a593Smuzhiyun return PTR_ERR(devpath);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun devname = strstr(devpath, "/dev/");
65*4882a593Smuzhiyun if (devname != NULL)
66*4882a593Smuzhiyun devname += strlen("/dev/");
67*4882a593Smuzhiyun else
68*4882a593Smuzhiyun devname = devpath;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun snprintf(buf, TASK_COMM_LEN, "%d.%s", blkif->domid, devname);
71*4882a593Smuzhiyun kfree(devpath);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun return 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
xen_update_blkif_status(struct xen_blkif * blkif)76*4882a593Smuzhiyun static void xen_update_blkif_status(struct xen_blkif *blkif)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun int err;
79*4882a593Smuzhiyun char name[TASK_COMM_LEN];
80*4882a593Smuzhiyun struct xen_blkif_ring *ring;
81*4882a593Smuzhiyun int i;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Not ready to connect? */
84*4882a593Smuzhiyun if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev)
85*4882a593Smuzhiyun return;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Already connected? */
88*4882a593Smuzhiyun if (blkif->be->dev->state == XenbusStateConnected)
89*4882a593Smuzhiyun return;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* Attempt to connect: exit if we fail to. */
92*4882a593Smuzhiyun connect(blkif->be);
93*4882a593Smuzhiyun if (blkif->be->dev->state != XenbusStateConnected)
94*4882a593Smuzhiyun return;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun err = blkback_name(blkif, name);
97*4882a593Smuzhiyun if (err) {
98*4882a593Smuzhiyun xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
99*4882a593Smuzhiyun return;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
103*4882a593Smuzhiyun if (err) {
104*4882a593Smuzhiyun xenbus_dev_error(blkif->be->dev, err, "block flush");
105*4882a593Smuzhiyun return;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun for (i = 0; i < blkif->nr_rings; i++) {
110*4882a593Smuzhiyun ring = &blkif->rings[i];
111*4882a593Smuzhiyun ring->xenblkd = kthread_run(xen_blkif_schedule, ring, "%s-%d", name, i);
112*4882a593Smuzhiyun if (IS_ERR(ring->xenblkd)) {
113*4882a593Smuzhiyun err = PTR_ERR(ring->xenblkd);
114*4882a593Smuzhiyun ring->xenblkd = NULL;
115*4882a593Smuzhiyun xenbus_dev_fatal(blkif->be->dev, err,
116*4882a593Smuzhiyun "start %s-%d xenblkd", name, i);
117*4882a593Smuzhiyun goto out;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun return;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun out:
123*4882a593Smuzhiyun while (--i >= 0) {
124*4882a593Smuzhiyun ring = &blkif->rings[i];
125*4882a593Smuzhiyun kthread_stop(ring->xenblkd);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun return;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
xen_blkif_alloc_rings(struct xen_blkif * blkif)130*4882a593Smuzhiyun static int xen_blkif_alloc_rings(struct xen_blkif *blkif)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun unsigned int r;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
135*4882a593Smuzhiyun GFP_KERNEL);
136*4882a593Smuzhiyun if (!blkif->rings)
137*4882a593Smuzhiyun return -ENOMEM;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun for (r = 0; r < blkif->nr_rings; r++) {
140*4882a593Smuzhiyun struct xen_blkif_ring *ring = &blkif->rings[r];
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun spin_lock_init(&ring->blk_ring_lock);
143*4882a593Smuzhiyun init_waitqueue_head(&ring->wq);
144*4882a593Smuzhiyun INIT_LIST_HEAD(&ring->pending_free);
145*4882a593Smuzhiyun INIT_LIST_HEAD(&ring->persistent_purge_list);
146*4882a593Smuzhiyun INIT_WORK(&ring->persistent_purge_work, xen_blkbk_unmap_purged_grants);
147*4882a593Smuzhiyun gnttab_page_cache_init(&ring->free_pages);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun spin_lock_init(&ring->pending_free_lock);
150*4882a593Smuzhiyun init_waitqueue_head(&ring->pending_free_wq);
151*4882a593Smuzhiyun init_waitqueue_head(&ring->shutdown_wq);
152*4882a593Smuzhiyun ring->blkif = blkif;
153*4882a593Smuzhiyun ring->st_print = jiffies;
154*4882a593Smuzhiyun ring->active = true;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun return 0;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Enable the persistent grants feature. */
161*4882a593Smuzhiyun static bool feature_persistent = true;
162*4882a593Smuzhiyun module_param(feature_persistent, bool, 0644);
163*4882a593Smuzhiyun MODULE_PARM_DESC(feature_persistent, "Enables the persistent grants feature");
164*4882a593Smuzhiyun
xen_blkif_alloc(domid_t domid)165*4882a593Smuzhiyun static struct xen_blkif *xen_blkif_alloc(domid_t domid)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct xen_blkif *blkif;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun BUILD_BUG_ON(MAX_INDIRECT_PAGES > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
172*4882a593Smuzhiyun if (!blkif)
173*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun blkif->domid = domid;
176*4882a593Smuzhiyun atomic_set(&blkif->refcnt, 1);
177*4882a593Smuzhiyun init_completion(&blkif->drain_complete);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * Because freeing back to the cache may be deferred, it is not
181*4882a593Smuzhiyun * safe to unload the module (and hence destroy the cache) until
182*4882a593Smuzhiyun * this has completed. To prevent premature unloading, take an
183*4882a593Smuzhiyun * extra module reference here and release only when the object
184*4882a593Smuzhiyun * has been freed back to the cache.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun __module_get(THIS_MODULE);
187*4882a593Smuzhiyun INIT_WORK(&blkif->free_work, xen_blkif_deferred_free);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun return blkif;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
xen_blkif_map(struct xen_blkif_ring * ring,grant_ref_t * gref,unsigned int nr_grefs,unsigned int evtchn)192*4882a593Smuzhiyun static int xen_blkif_map(struct xen_blkif_ring *ring, grant_ref_t *gref,
193*4882a593Smuzhiyun unsigned int nr_grefs, unsigned int evtchn)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun int err;
196*4882a593Smuzhiyun struct xen_blkif *blkif = ring->blkif;
197*4882a593Smuzhiyun const struct blkif_common_sring *sring_common;
198*4882a593Smuzhiyun RING_IDX rsp_prod, req_prod;
199*4882a593Smuzhiyun unsigned int size;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* Already connected through? */
202*4882a593Smuzhiyun if (ring->irq)
203*4882a593Smuzhiyun return 0;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun err = xenbus_map_ring_valloc(blkif->be->dev, gref, nr_grefs,
206*4882a593Smuzhiyun &ring->blk_ring);
207*4882a593Smuzhiyun if (err < 0)
208*4882a593Smuzhiyun return err;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun sring_common = (struct blkif_common_sring *)ring->blk_ring;
211*4882a593Smuzhiyun rsp_prod = READ_ONCE(sring_common->rsp_prod);
212*4882a593Smuzhiyun req_prod = READ_ONCE(sring_common->req_prod);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun switch (blkif->blk_protocol) {
215*4882a593Smuzhiyun case BLKIF_PROTOCOL_NATIVE:
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct blkif_sring *sring_native =
218*4882a593Smuzhiyun (struct blkif_sring *)ring->blk_ring;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun BACK_RING_ATTACH(&ring->blk_rings.native, sring_native,
221*4882a593Smuzhiyun rsp_prod, XEN_PAGE_SIZE * nr_grefs);
222*4882a593Smuzhiyun size = __RING_SIZE(sring_native, XEN_PAGE_SIZE * nr_grefs);
223*4882a593Smuzhiyun break;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun case BLKIF_PROTOCOL_X86_32:
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct blkif_x86_32_sring *sring_x86_32 =
228*4882a593Smuzhiyun (struct blkif_x86_32_sring *)ring->blk_ring;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun BACK_RING_ATTACH(&ring->blk_rings.x86_32, sring_x86_32,
231*4882a593Smuzhiyun rsp_prod, XEN_PAGE_SIZE * nr_grefs);
232*4882a593Smuzhiyun size = __RING_SIZE(sring_x86_32, XEN_PAGE_SIZE * nr_grefs);
233*4882a593Smuzhiyun break;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun case BLKIF_PROTOCOL_X86_64:
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct blkif_x86_64_sring *sring_x86_64 =
238*4882a593Smuzhiyun (struct blkif_x86_64_sring *)ring->blk_ring;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun BACK_RING_ATTACH(&ring->blk_rings.x86_64, sring_x86_64,
241*4882a593Smuzhiyun rsp_prod, XEN_PAGE_SIZE * nr_grefs);
242*4882a593Smuzhiyun size = __RING_SIZE(sring_x86_64, XEN_PAGE_SIZE * nr_grefs);
243*4882a593Smuzhiyun break;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun default:
246*4882a593Smuzhiyun BUG();
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun err = -EIO;
250*4882a593Smuzhiyun if (req_prod - rsp_prod > size)
251*4882a593Smuzhiyun goto fail;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun err = bind_interdomain_evtchn_to_irqhandler_lateeoi(blkif->domid,
254*4882a593Smuzhiyun evtchn, xen_blkif_be_int, 0, "blkif-backend", ring);
255*4882a593Smuzhiyun if (err < 0)
256*4882a593Smuzhiyun goto fail;
257*4882a593Smuzhiyun ring->irq = err;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return 0;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun fail:
262*4882a593Smuzhiyun xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
263*4882a593Smuzhiyun ring->blk_rings.common.sring = NULL;
264*4882a593Smuzhiyun return err;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
xen_blkif_disconnect(struct xen_blkif * blkif)267*4882a593Smuzhiyun static int xen_blkif_disconnect(struct xen_blkif *blkif)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun struct pending_req *req, *n;
270*4882a593Smuzhiyun unsigned int j, r;
271*4882a593Smuzhiyun bool busy = false;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun for (r = 0; r < blkif->nr_rings; r++) {
274*4882a593Smuzhiyun struct xen_blkif_ring *ring = &blkif->rings[r];
275*4882a593Smuzhiyun unsigned int i = 0;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if (!ring->active)
278*4882a593Smuzhiyun continue;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (ring->xenblkd) {
281*4882a593Smuzhiyun kthread_stop(ring->xenblkd);
282*4882a593Smuzhiyun ring->xenblkd = NULL;
283*4882a593Smuzhiyun wake_up(&ring->shutdown_wq);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* The above kthread_stop() guarantees that at this point we
287*4882a593Smuzhiyun * don't have any discard_io or other_io requests. So, checking
288*4882a593Smuzhiyun * for inflight IO is enough.
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun if (atomic_read(&ring->inflight) > 0) {
291*4882a593Smuzhiyun busy = true;
292*4882a593Smuzhiyun continue;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (ring->irq) {
296*4882a593Smuzhiyun unbind_from_irqhandler(ring->irq, ring);
297*4882a593Smuzhiyun ring->irq = 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (ring->blk_rings.common.sring) {
301*4882a593Smuzhiyun xenbus_unmap_ring_vfree(blkif->be->dev, ring->blk_ring);
302*4882a593Smuzhiyun ring->blk_rings.common.sring = NULL;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* Remove all persistent grants and the cache of ballooned pages. */
306*4882a593Smuzhiyun xen_blkbk_free_caches(ring);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* Check that there is no request in use */
309*4882a593Smuzhiyun list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
310*4882a593Smuzhiyun list_del(&req->free_list);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
313*4882a593Smuzhiyun kfree(req->segments[j]);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun for (j = 0; j < MAX_INDIRECT_PAGES; j++)
316*4882a593Smuzhiyun kfree(req->indirect_pages[j]);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun kfree(req);
319*4882a593Smuzhiyun i++;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun BUG_ON(atomic_read(&ring->persistent_gnt_in_use) != 0);
323*4882a593Smuzhiyun BUG_ON(!list_empty(&ring->persistent_purge_list));
324*4882a593Smuzhiyun BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
325*4882a593Smuzhiyun BUG_ON(ring->free_pages.num_pages != 0);
326*4882a593Smuzhiyun BUG_ON(ring->persistent_gnt_c != 0);
327*4882a593Smuzhiyun WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
328*4882a593Smuzhiyun ring->active = false;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun if (busy)
331*4882a593Smuzhiyun return -EBUSY;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun blkif->nr_ring_pages = 0;
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun * blkif->rings was allocated in connect_ring, so we should free it in
336*4882a593Smuzhiyun * here.
337*4882a593Smuzhiyun */
338*4882a593Smuzhiyun kfree(blkif->rings);
339*4882a593Smuzhiyun blkif->rings = NULL;
340*4882a593Smuzhiyun blkif->nr_rings = 0;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun return 0;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
xen_blkif_free(struct xen_blkif * blkif)345*4882a593Smuzhiyun static void xen_blkif_free(struct xen_blkif *blkif)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun WARN_ON(xen_blkif_disconnect(blkif));
348*4882a593Smuzhiyun xen_vbd_free(&blkif->vbd);
349*4882a593Smuzhiyun kfree(blkif->be->mode);
350*4882a593Smuzhiyun kfree(blkif->be);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* Make sure everything is drained before shutting down */
353*4882a593Smuzhiyun kmem_cache_free(xen_blkif_cachep, blkif);
354*4882a593Smuzhiyun module_put(THIS_MODULE);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
xen_blkif_interface_init(void)357*4882a593Smuzhiyun int __init xen_blkif_interface_init(void)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun xen_blkif_cachep = kmem_cache_create("blkif_cache",
360*4882a593Smuzhiyun sizeof(struct xen_blkif),
361*4882a593Smuzhiyun 0, 0, NULL);
362*4882a593Smuzhiyun if (!xen_blkif_cachep)
363*4882a593Smuzhiyun return -ENOMEM;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun return 0;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
xen_blkif_interface_fini(void)368*4882a593Smuzhiyun void xen_blkif_interface_fini(void)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun kmem_cache_destroy(xen_blkif_cachep);
371*4882a593Smuzhiyun xen_blkif_cachep = NULL;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /*
375*4882a593Smuzhiyun * sysfs interface for VBD I/O requests
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun #define VBD_SHOW_ALLRING(name, format) \
379*4882a593Smuzhiyun static ssize_t show_##name(struct device *_dev, \
380*4882a593Smuzhiyun struct device_attribute *attr, \
381*4882a593Smuzhiyun char *buf) \
382*4882a593Smuzhiyun { \
383*4882a593Smuzhiyun struct xenbus_device *dev = to_xenbus_device(_dev); \
384*4882a593Smuzhiyun struct backend_info *be = dev_get_drvdata(&dev->dev); \
385*4882a593Smuzhiyun struct xen_blkif *blkif = be->blkif; \
386*4882a593Smuzhiyun unsigned int i; \
387*4882a593Smuzhiyun unsigned long long result = 0; \
388*4882a593Smuzhiyun \
389*4882a593Smuzhiyun if (!blkif->rings) \
390*4882a593Smuzhiyun goto out; \
391*4882a593Smuzhiyun \
392*4882a593Smuzhiyun for (i = 0; i < blkif->nr_rings; i++) { \
393*4882a593Smuzhiyun struct xen_blkif_ring *ring = &blkif->rings[i]; \
394*4882a593Smuzhiyun \
395*4882a593Smuzhiyun result += ring->st_##name; \
396*4882a593Smuzhiyun } \
397*4882a593Smuzhiyun \
398*4882a593Smuzhiyun out: \
399*4882a593Smuzhiyun return sprintf(buf, format, result); \
400*4882a593Smuzhiyun } \
401*4882a593Smuzhiyun static DEVICE_ATTR(name, 0444, show_##name, NULL)
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun VBD_SHOW_ALLRING(oo_req, "%llu\n");
404*4882a593Smuzhiyun VBD_SHOW_ALLRING(rd_req, "%llu\n");
405*4882a593Smuzhiyun VBD_SHOW_ALLRING(wr_req, "%llu\n");
406*4882a593Smuzhiyun VBD_SHOW_ALLRING(f_req, "%llu\n");
407*4882a593Smuzhiyun VBD_SHOW_ALLRING(ds_req, "%llu\n");
408*4882a593Smuzhiyun VBD_SHOW_ALLRING(rd_sect, "%llu\n");
409*4882a593Smuzhiyun VBD_SHOW_ALLRING(wr_sect, "%llu\n");
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun static struct attribute *xen_vbdstat_attrs[] = {
412*4882a593Smuzhiyun &dev_attr_oo_req.attr,
413*4882a593Smuzhiyun &dev_attr_rd_req.attr,
414*4882a593Smuzhiyun &dev_attr_wr_req.attr,
415*4882a593Smuzhiyun &dev_attr_f_req.attr,
416*4882a593Smuzhiyun &dev_attr_ds_req.attr,
417*4882a593Smuzhiyun &dev_attr_rd_sect.attr,
418*4882a593Smuzhiyun &dev_attr_wr_sect.attr,
419*4882a593Smuzhiyun NULL
420*4882a593Smuzhiyun };
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun static const struct attribute_group xen_vbdstat_group = {
423*4882a593Smuzhiyun .name = "statistics",
424*4882a593Smuzhiyun .attrs = xen_vbdstat_attrs,
425*4882a593Smuzhiyun };
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun #define VBD_SHOW(name, format, args...) \
428*4882a593Smuzhiyun static ssize_t show_##name(struct device *_dev, \
429*4882a593Smuzhiyun struct device_attribute *attr, \
430*4882a593Smuzhiyun char *buf) \
431*4882a593Smuzhiyun { \
432*4882a593Smuzhiyun struct xenbus_device *dev = to_xenbus_device(_dev); \
433*4882a593Smuzhiyun struct backend_info *be = dev_get_drvdata(&dev->dev); \
434*4882a593Smuzhiyun \
435*4882a593Smuzhiyun return sprintf(buf, format, ##args); \
436*4882a593Smuzhiyun } \
437*4882a593Smuzhiyun static DEVICE_ATTR(name, 0444, show_##name, NULL)
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
440*4882a593Smuzhiyun VBD_SHOW(mode, "%s\n", be->mode);
441*4882a593Smuzhiyun
xenvbd_sysfs_addif(struct xenbus_device * dev)442*4882a593Smuzhiyun static int xenvbd_sysfs_addif(struct xenbus_device *dev)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun int error;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun error = device_create_file(&dev->dev, &dev_attr_physical_device);
447*4882a593Smuzhiyun if (error)
448*4882a593Smuzhiyun goto fail1;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun error = device_create_file(&dev->dev, &dev_attr_mode);
451*4882a593Smuzhiyun if (error)
452*4882a593Smuzhiyun goto fail2;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
455*4882a593Smuzhiyun if (error)
456*4882a593Smuzhiyun goto fail3;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun return 0;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
461*4882a593Smuzhiyun fail2: device_remove_file(&dev->dev, &dev_attr_mode);
462*4882a593Smuzhiyun fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
463*4882a593Smuzhiyun return error;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
xenvbd_sysfs_delif(struct xenbus_device * dev)466*4882a593Smuzhiyun static void xenvbd_sysfs_delif(struct xenbus_device *dev)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
469*4882a593Smuzhiyun device_remove_file(&dev->dev, &dev_attr_mode);
470*4882a593Smuzhiyun device_remove_file(&dev->dev, &dev_attr_physical_device);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
xen_vbd_free(struct xen_vbd * vbd)473*4882a593Smuzhiyun static void xen_vbd_free(struct xen_vbd *vbd)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun if (vbd->bdev)
476*4882a593Smuzhiyun blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
477*4882a593Smuzhiyun vbd->bdev = NULL;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
xen_vbd_create(struct xen_blkif * blkif,blkif_vdev_t handle,unsigned major,unsigned minor,int readonly,int cdrom)480*4882a593Smuzhiyun static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
481*4882a593Smuzhiyun unsigned major, unsigned minor, int readonly,
482*4882a593Smuzhiyun int cdrom)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun struct xen_vbd *vbd;
485*4882a593Smuzhiyun struct block_device *bdev;
486*4882a593Smuzhiyun struct request_queue *q;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun vbd = &blkif->vbd;
489*4882a593Smuzhiyun vbd->handle = handle;
490*4882a593Smuzhiyun vbd->readonly = readonly;
491*4882a593Smuzhiyun vbd->type = 0;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun vbd->pdevice = MKDEV(major, minor);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
496*4882a593Smuzhiyun FMODE_READ : FMODE_WRITE, NULL);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (IS_ERR(bdev)) {
499*4882a593Smuzhiyun pr_warn("xen_vbd_create: device %08x could not be opened\n",
500*4882a593Smuzhiyun vbd->pdevice);
501*4882a593Smuzhiyun return -ENOENT;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun vbd->bdev = bdev;
505*4882a593Smuzhiyun if (vbd->bdev->bd_disk == NULL) {
506*4882a593Smuzhiyun pr_warn("xen_vbd_create: device %08x doesn't exist\n",
507*4882a593Smuzhiyun vbd->pdevice);
508*4882a593Smuzhiyun xen_vbd_free(vbd);
509*4882a593Smuzhiyun return -ENOENT;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun vbd->size = vbd_sz(vbd);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
514*4882a593Smuzhiyun vbd->type |= VDISK_CDROM;
515*4882a593Smuzhiyun if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
516*4882a593Smuzhiyun vbd->type |= VDISK_REMOVABLE;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun q = bdev_get_queue(bdev);
519*4882a593Smuzhiyun if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
520*4882a593Smuzhiyun vbd->flush_support = true;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (q && blk_queue_secure_erase(q))
523*4882a593Smuzhiyun vbd->discard_secure = true;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun pr_debug("Successful creation of handle=%04x (dom=%u)\n",
526*4882a593Smuzhiyun handle, blkif->domid);
527*4882a593Smuzhiyun return 0;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
xen_blkbk_remove(struct xenbus_device * dev)530*4882a593Smuzhiyun static int xen_blkbk_remove(struct xenbus_device *dev)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct backend_info *be = dev_get_drvdata(&dev->dev);
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun if (be->major || be->minor)
537*4882a593Smuzhiyun xenvbd_sysfs_delif(dev);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (be->backend_watch.node) {
540*4882a593Smuzhiyun unregister_xenbus_watch(&be->backend_watch);
541*4882a593Smuzhiyun kfree(be->backend_watch.node);
542*4882a593Smuzhiyun be->backend_watch.node = NULL;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun dev_set_drvdata(&dev->dev, NULL);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (be->blkif) {
548*4882a593Smuzhiyun xen_blkif_disconnect(be->blkif);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* Put the reference we set in xen_blkif_alloc(). */
551*4882a593Smuzhiyun xen_blkif_put(be->blkif);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun return 0;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,struct backend_info * be,int state)557*4882a593Smuzhiyun int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
558*4882a593Smuzhiyun struct backend_info *be, int state)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun struct xenbus_device *dev = be->dev;
561*4882a593Smuzhiyun int err;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
564*4882a593Smuzhiyun "%d", state);
565*4882a593Smuzhiyun if (err)
566*4882a593Smuzhiyun dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return err;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
xen_blkbk_discard(struct xenbus_transaction xbt,struct backend_info * be)571*4882a593Smuzhiyun static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun struct xenbus_device *dev = be->dev;
574*4882a593Smuzhiyun struct xen_blkif *blkif = be->blkif;
575*4882a593Smuzhiyun int err;
576*4882a593Smuzhiyun int state = 0;
577*4882a593Smuzhiyun struct block_device *bdev = be->blkif->vbd.bdev;
578*4882a593Smuzhiyun struct request_queue *q = bdev_get_queue(bdev);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1))
581*4882a593Smuzhiyun return;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (blk_queue_discard(q)) {
584*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename,
585*4882a593Smuzhiyun "discard-granularity", "%u",
586*4882a593Smuzhiyun q->limits.discard_granularity);
587*4882a593Smuzhiyun if (err) {
588*4882a593Smuzhiyun dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
589*4882a593Smuzhiyun return;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename,
592*4882a593Smuzhiyun "discard-alignment", "%u",
593*4882a593Smuzhiyun q->limits.discard_alignment);
594*4882a593Smuzhiyun if (err) {
595*4882a593Smuzhiyun dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
596*4882a593Smuzhiyun return;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun state = 1;
599*4882a593Smuzhiyun /* Optional. */
600*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename,
601*4882a593Smuzhiyun "discard-secure", "%d",
602*4882a593Smuzhiyun blkif->vbd.discard_secure);
603*4882a593Smuzhiyun if (err) {
604*4882a593Smuzhiyun dev_warn(&dev->dev, "writing discard-secure (%d)", err);
605*4882a593Smuzhiyun return;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "feature-discard",
609*4882a593Smuzhiyun "%d", state);
610*4882a593Smuzhiyun if (err)
611*4882a593Smuzhiyun dev_warn(&dev->dev, "writing feature-discard (%d)", err);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
xen_blkbk_barrier(struct xenbus_transaction xbt,struct backend_info * be,int state)614*4882a593Smuzhiyun int xen_blkbk_barrier(struct xenbus_transaction xbt,
615*4882a593Smuzhiyun struct backend_info *be, int state)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun struct xenbus_device *dev = be->dev;
618*4882a593Smuzhiyun int err;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
621*4882a593Smuzhiyun "%d", state);
622*4882a593Smuzhiyun if (err)
623*4882a593Smuzhiyun dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun return err;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /*
629*4882a593Smuzhiyun * Entry point to this code when a new device is created. Allocate the basic
630*4882a593Smuzhiyun * structures, and watch the store waiting for the hotplug scripts to tell us
631*4882a593Smuzhiyun * the device's physical major and minor numbers. Switch to InitWait.
632*4882a593Smuzhiyun */
xen_blkbk_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)633*4882a593Smuzhiyun static int xen_blkbk_probe(struct xenbus_device *dev,
634*4882a593Smuzhiyun const struct xenbus_device_id *id)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun int err;
637*4882a593Smuzhiyun struct backend_info *be = kzalloc(sizeof(struct backend_info),
638*4882a593Smuzhiyun GFP_KERNEL);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /* match the pr_debug in xen_blkbk_remove */
641*4882a593Smuzhiyun pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun if (!be) {
644*4882a593Smuzhiyun xenbus_dev_fatal(dev, -ENOMEM,
645*4882a593Smuzhiyun "allocating backend structure");
646*4882a593Smuzhiyun return -ENOMEM;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun be->dev = dev;
649*4882a593Smuzhiyun dev_set_drvdata(&dev->dev, be);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun be->blkif = xen_blkif_alloc(dev->otherend_id);
652*4882a593Smuzhiyun if (IS_ERR(be->blkif)) {
653*4882a593Smuzhiyun err = PTR_ERR(be->blkif);
654*4882a593Smuzhiyun be->blkif = NULL;
655*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "creating block interface");
656*4882a593Smuzhiyun goto fail;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun err = xenbus_printf(XBT_NIL, dev->nodename,
660*4882a593Smuzhiyun "feature-max-indirect-segments", "%u",
661*4882a593Smuzhiyun MAX_INDIRECT_SEGMENTS);
662*4882a593Smuzhiyun if (err)
663*4882a593Smuzhiyun dev_warn(&dev->dev,
664*4882a593Smuzhiyun "writing %s/feature-max-indirect-segments (%d)",
665*4882a593Smuzhiyun dev->nodename, err);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* Multi-queue: advertise how many queues are supported by us.*/
668*4882a593Smuzhiyun err = xenbus_printf(XBT_NIL, dev->nodename,
669*4882a593Smuzhiyun "multi-queue-max-queues", "%u", xenblk_max_queues);
670*4882a593Smuzhiyun if (err)
671*4882a593Smuzhiyun pr_warn("Error writing multi-queue-max-queues\n");
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* setup back pointer */
674*4882a593Smuzhiyun be->blkif->be = be;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
677*4882a593Smuzhiyun backend_changed,
678*4882a593Smuzhiyun "%s/%s", dev->nodename, "physical-device");
679*4882a593Smuzhiyun if (err)
680*4882a593Smuzhiyun goto fail;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun err = xenbus_printf(XBT_NIL, dev->nodename, "max-ring-page-order", "%u",
683*4882a593Smuzhiyun xen_blkif_max_ring_order);
684*4882a593Smuzhiyun if (err)
685*4882a593Smuzhiyun pr_warn("%s write out 'max-ring-page-order' failed\n", __func__);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun err = xenbus_switch_state(dev, XenbusStateInitWait);
688*4882a593Smuzhiyun if (err)
689*4882a593Smuzhiyun goto fail;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun return 0;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun fail:
694*4882a593Smuzhiyun pr_warn("%s failed\n", __func__);
695*4882a593Smuzhiyun xen_blkbk_remove(dev);
696*4882a593Smuzhiyun return err;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /*
700*4882a593Smuzhiyun * Callback received when the hotplug scripts have placed the physical-device
701*4882a593Smuzhiyun * node. Read it and the mode node, and create a vbd. If the frontend is
702*4882a593Smuzhiyun * ready, connect.
703*4882a593Smuzhiyun */
backend_changed(struct xenbus_watch * watch,const char * path,const char * token)704*4882a593Smuzhiyun static void backend_changed(struct xenbus_watch *watch,
705*4882a593Smuzhiyun const char *path, const char *token)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun int err;
708*4882a593Smuzhiyun unsigned major;
709*4882a593Smuzhiyun unsigned minor;
710*4882a593Smuzhiyun struct backend_info *be
711*4882a593Smuzhiyun = container_of(watch, struct backend_info, backend_watch);
712*4882a593Smuzhiyun struct xenbus_device *dev = be->dev;
713*4882a593Smuzhiyun int cdrom = 0;
714*4882a593Smuzhiyun unsigned long handle;
715*4882a593Smuzhiyun char *device_type;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
720*4882a593Smuzhiyun &major, &minor);
721*4882a593Smuzhiyun if (XENBUS_EXIST_ERR(err)) {
722*4882a593Smuzhiyun /*
723*4882a593Smuzhiyun * Since this watch will fire once immediately after it is
724*4882a593Smuzhiyun * registered, we expect this. Ignore it, and wait for the
725*4882a593Smuzhiyun * hotplug scripts.
726*4882a593Smuzhiyun */
727*4882a593Smuzhiyun return;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun if (err != 2) {
730*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "reading physical-device");
731*4882a593Smuzhiyun return;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (be->major | be->minor) {
735*4882a593Smuzhiyun if (be->major != major || be->minor != minor)
736*4882a593Smuzhiyun pr_warn("changing physical device (from %x:%x to %x:%x) not supported.\n",
737*4882a593Smuzhiyun be->major, be->minor, major, minor);
738*4882a593Smuzhiyun return;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
742*4882a593Smuzhiyun if (IS_ERR(be->mode)) {
743*4882a593Smuzhiyun err = PTR_ERR(be->mode);
744*4882a593Smuzhiyun be->mode = NULL;
745*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "reading mode");
746*4882a593Smuzhiyun return;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
750*4882a593Smuzhiyun if (!IS_ERR(device_type)) {
751*4882a593Smuzhiyun cdrom = strcmp(device_type, "cdrom") == 0;
752*4882a593Smuzhiyun kfree(device_type);
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /* Front end dir is a number, which is used as the handle. */
756*4882a593Smuzhiyun err = kstrtoul(strrchr(dev->otherend, '/') + 1, 0, &handle);
757*4882a593Smuzhiyun if (err) {
758*4882a593Smuzhiyun kfree(be->mode);
759*4882a593Smuzhiyun be->mode = NULL;
760*4882a593Smuzhiyun return;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun be->major = major;
764*4882a593Smuzhiyun be->minor = minor;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun err = xen_vbd_create(be->blkif, handle, major, minor,
767*4882a593Smuzhiyun !strchr(be->mode, 'w'), cdrom);
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun if (err)
770*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "creating vbd structure");
771*4882a593Smuzhiyun else {
772*4882a593Smuzhiyun err = xenvbd_sysfs_addif(dev);
773*4882a593Smuzhiyun if (err) {
774*4882a593Smuzhiyun xen_vbd_free(&be->blkif->vbd);
775*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "creating sysfs entries");
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (err) {
780*4882a593Smuzhiyun kfree(be->mode);
781*4882a593Smuzhiyun be->mode = NULL;
782*4882a593Smuzhiyun be->major = 0;
783*4882a593Smuzhiyun be->minor = 0;
784*4882a593Smuzhiyun } else {
785*4882a593Smuzhiyun /* We're potentially connected now */
786*4882a593Smuzhiyun xen_update_blkif_status(be->blkif);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /*
791*4882a593Smuzhiyun * Callback received when the frontend's state changes.
792*4882a593Smuzhiyun */
frontend_changed(struct xenbus_device * dev,enum xenbus_state frontend_state)793*4882a593Smuzhiyun static void frontend_changed(struct xenbus_device *dev,
794*4882a593Smuzhiyun enum xenbus_state frontend_state)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun struct backend_info *be = dev_get_drvdata(&dev->dev);
797*4882a593Smuzhiyun int err;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun pr_debug("%s %p %s\n", __func__, dev, xenbus_strstate(frontend_state));
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun switch (frontend_state) {
802*4882a593Smuzhiyun case XenbusStateInitialising:
803*4882a593Smuzhiyun if (dev->state == XenbusStateClosed) {
804*4882a593Smuzhiyun pr_info("%s: prepare for reconnect\n", dev->nodename);
805*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateInitWait);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun break;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun case XenbusStateInitialised:
810*4882a593Smuzhiyun case XenbusStateConnected:
811*4882a593Smuzhiyun /*
812*4882a593Smuzhiyun * Ensure we connect even when two watches fire in
813*4882a593Smuzhiyun * close succession and we miss the intermediate value
814*4882a593Smuzhiyun * of frontend_state.
815*4882a593Smuzhiyun */
816*4882a593Smuzhiyun if (dev->state == XenbusStateConnected)
817*4882a593Smuzhiyun break;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /*
820*4882a593Smuzhiyun * Enforce precondition before potential leak point.
821*4882a593Smuzhiyun * xen_blkif_disconnect() is idempotent.
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun err = xen_blkif_disconnect(be->blkif);
824*4882a593Smuzhiyun if (err) {
825*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "pending I/O");
826*4882a593Smuzhiyun break;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun err = connect_ring(be);
830*4882a593Smuzhiyun if (err) {
831*4882a593Smuzhiyun /*
832*4882a593Smuzhiyun * Clean up so that memory resources can be used by
833*4882a593Smuzhiyun * other devices. connect_ring reported already error.
834*4882a593Smuzhiyun */
835*4882a593Smuzhiyun xen_blkif_disconnect(be->blkif);
836*4882a593Smuzhiyun break;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun xen_update_blkif_status(be->blkif);
839*4882a593Smuzhiyun break;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun case XenbusStateClosing:
842*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateClosing);
843*4882a593Smuzhiyun break;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun case XenbusStateClosed:
846*4882a593Smuzhiyun xen_blkif_disconnect(be->blkif);
847*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateClosed);
848*4882a593Smuzhiyun if (xenbus_dev_is_online(dev))
849*4882a593Smuzhiyun break;
850*4882a593Smuzhiyun fallthrough;
851*4882a593Smuzhiyun /* if not online */
852*4882a593Smuzhiyun case XenbusStateUnknown:
853*4882a593Smuzhiyun /* implies xen_blkif_disconnect() via xen_blkbk_remove() */
854*4882a593Smuzhiyun device_unregister(&dev->dev);
855*4882a593Smuzhiyun break;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun default:
858*4882a593Smuzhiyun xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
859*4882a593Smuzhiyun frontend_state);
860*4882a593Smuzhiyun break;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun /* Once a memory pressure is detected, squeeze free page pools for a while. */
865*4882a593Smuzhiyun static unsigned int buffer_squeeze_duration_ms = 10;
866*4882a593Smuzhiyun module_param_named(buffer_squeeze_duration_ms,
867*4882a593Smuzhiyun buffer_squeeze_duration_ms, int, 0644);
868*4882a593Smuzhiyun MODULE_PARM_DESC(buffer_squeeze_duration_ms,
869*4882a593Smuzhiyun "Duration in ms to squeeze pages buffer when a memory pressure is detected");
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /*
872*4882a593Smuzhiyun * Callback received when the memory pressure is detected.
873*4882a593Smuzhiyun */
reclaim_memory(struct xenbus_device * dev)874*4882a593Smuzhiyun static void reclaim_memory(struct xenbus_device *dev)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun struct backend_info *be = dev_get_drvdata(&dev->dev);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun if (!be)
879*4882a593Smuzhiyun return;
880*4882a593Smuzhiyun be->blkif->buffer_squeeze_end = jiffies +
881*4882a593Smuzhiyun msecs_to_jiffies(buffer_squeeze_duration_ms);
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun /* ** Connection ** */
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /*
887*4882a593Smuzhiyun * Write the physical details regarding the block device to the store, and
888*4882a593Smuzhiyun * switch to Connected state.
889*4882a593Smuzhiyun */
connect(struct backend_info * be)890*4882a593Smuzhiyun static void connect(struct backend_info *be)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun struct xenbus_transaction xbt;
893*4882a593Smuzhiyun int err;
894*4882a593Smuzhiyun struct xenbus_device *dev = be->dev;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun pr_debug("%s %s\n", __func__, dev->otherend);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun /* Supply the information about the device the frontend needs */
899*4882a593Smuzhiyun again:
900*4882a593Smuzhiyun err = xenbus_transaction_start(&xbt);
901*4882a593Smuzhiyun if (err) {
902*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "starting transaction");
903*4882a593Smuzhiyun return;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /* If we can't advertise it is OK. */
907*4882a593Smuzhiyun xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun xen_blkbk_discard(xbt, be);
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
914*4882a593Smuzhiyun be->blkif->vbd.feature_gnt_persistent_parm);
915*4882a593Smuzhiyun if (err) {
916*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
917*4882a593Smuzhiyun dev->nodename);
918*4882a593Smuzhiyun goto abort;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
922*4882a593Smuzhiyun (unsigned long long)vbd_sz(&be->blkif->vbd));
923*4882a593Smuzhiyun if (err) {
924*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "writing %s/sectors",
925*4882a593Smuzhiyun dev->nodename);
926*4882a593Smuzhiyun goto abort;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /* FIXME: use a typename instead */
930*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "info", "%u",
931*4882a593Smuzhiyun be->blkif->vbd.type |
932*4882a593Smuzhiyun (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
933*4882a593Smuzhiyun if (err) {
934*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "writing %s/info",
935*4882a593Smuzhiyun dev->nodename);
936*4882a593Smuzhiyun goto abort;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
939*4882a593Smuzhiyun (unsigned long)
940*4882a593Smuzhiyun bdev_logical_block_size(be->blkif->vbd.bdev));
941*4882a593Smuzhiyun if (err) {
942*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "writing %s/sector-size",
943*4882a593Smuzhiyun dev->nodename);
944*4882a593Smuzhiyun goto abort;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "physical-sector-size", "%u",
947*4882a593Smuzhiyun bdev_physical_block_size(be->blkif->vbd.bdev));
948*4882a593Smuzhiyun if (err)
949*4882a593Smuzhiyun xenbus_dev_error(dev, err, "writing %s/physical-sector-size",
950*4882a593Smuzhiyun dev->nodename);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun err = xenbus_transaction_end(xbt, 0);
953*4882a593Smuzhiyun if (err == -EAGAIN)
954*4882a593Smuzhiyun goto again;
955*4882a593Smuzhiyun if (err)
956*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "ending transaction");
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun err = xenbus_switch_state(dev, XenbusStateConnected);
959*4882a593Smuzhiyun if (err)
960*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
961*4882a593Smuzhiyun dev->nodename);
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun return;
964*4882a593Smuzhiyun abort:
965*4882a593Smuzhiyun xenbus_transaction_end(xbt, 1);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /*
969*4882a593Smuzhiyun * Each ring may have multi pages, depends on "ring-page-order".
970*4882a593Smuzhiyun */
read_per_ring_refs(struct xen_blkif_ring * ring,const char * dir)971*4882a593Smuzhiyun static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
974*4882a593Smuzhiyun struct pending_req *req, *n;
975*4882a593Smuzhiyun int err, i, j;
976*4882a593Smuzhiyun struct xen_blkif *blkif = ring->blkif;
977*4882a593Smuzhiyun struct xenbus_device *dev = blkif->be->dev;
978*4882a593Smuzhiyun unsigned int nr_grefs, evtchn;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
981*4882a593Smuzhiyun &evtchn);
982*4882a593Smuzhiyun if (err != 1) {
983*4882a593Smuzhiyun err = -EINVAL;
984*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "reading %s/event-channel", dir);
985*4882a593Smuzhiyun return err;
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun nr_grefs = blkif->nr_ring_pages;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (unlikely(!nr_grefs)) {
991*4882a593Smuzhiyun WARN_ON(true);
992*4882a593Smuzhiyun return -EINVAL;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun for (i = 0; i < nr_grefs; i++) {
996*4882a593Smuzhiyun char ring_ref_name[RINGREF_NAME_LEN];
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (blkif->multi_ref)
999*4882a593Smuzhiyun snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1000*4882a593Smuzhiyun else {
1001*4882a593Smuzhiyun WARN_ON(i != 0);
1002*4882a593Smuzhiyun snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
1006*4882a593Smuzhiyun "%u", &ring_ref[i]);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun if (err != 1) {
1009*4882a593Smuzhiyun err = -EINVAL;
1010*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "reading %s/%s",
1011*4882a593Smuzhiyun dir, ring_ref_name);
1012*4882a593Smuzhiyun return err;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun err = -ENOMEM;
1017*4882a593Smuzhiyun for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
1018*4882a593Smuzhiyun req = kzalloc(sizeof(*req), GFP_KERNEL);
1019*4882a593Smuzhiyun if (!req)
1020*4882a593Smuzhiyun goto fail;
1021*4882a593Smuzhiyun list_add_tail(&req->free_list, &ring->pending_free);
1022*4882a593Smuzhiyun for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1023*4882a593Smuzhiyun req->segments[j] = kzalloc(sizeof(*req->segments[0]), GFP_KERNEL);
1024*4882a593Smuzhiyun if (!req->segments[j])
1025*4882a593Smuzhiyun goto fail;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1028*4882a593Smuzhiyun req->indirect_pages[j] = kzalloc(sizeof(*req->indirect_pages[0]),
1029*4882a593Smuzhiyun GFP_KERNEL);
1030*4882a593Smuzhiyun if (!req->indirect_pages[j])
1031*4882a593Smuzhiyun goto fail;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun /* Map the shared frame, irq etc. */
1036*4882a593Smuzhiyun err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
1037*4882a593Smuzhiyun if (err) {
1038*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
1039*4882a593Smuzhiyun goto fail;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun return 0;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun fail:
1045*4882a593Smuzhiyun list_for_each_entry_safe(req, n, &ring->pending_free, free_list) {
1046*4882a593Smuzhiyun list_del(&req->free_list);
1047*4882a593Smuzhiyun for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++) {
1048*4882a593Smuzhiyun if (!req->segments[j])
1049*4882a593Smuzhiyun break;
1050*4882a593Smuzhiyun kfree(req->segments[j]);
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun for (j = 0; j < MAX_INDIRECT_PAGES; j++) {
1053*4882a593Smuzhiyun if (!req->indirect_pages[j])
1054*4882a593Smuzhiyun break;
1055*4882a593Smuzhiyun kfree(req->indirect_pages[j]);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun kfree(req);
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun return err;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
connect_ring(struct backend_info * be)1062*4882a593Smuzhiyun static int connect_ring(struct backend_info *be)
1063*4882a593Smuzhiyun {
1064*4882a593Smuzhiyun struct xenbus_device *dev = be->dev;
1065*4882a593Smuzhiyun struct xen_blkif *blkif = be->blkif;
1066*4882a593Smuzhiyun char protocol[64] = "";
1067*4882a593Smuzhiyun int err, i;
1068*4882a593Smuzhiyun char *xspath;
1069*4882a593Smuzhiyun size_t xspathsize;
1070*4882a593Smuzhiyun const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
1071*4882a593Smuzhiyun unsigned int requested_num_queues = 0;
1072*4882a593Smuzhiyun unsigned int ring_page_order;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun pr_debug("%s %s\n", __func__, dev->otherend);
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
1077*4882a593Smuzhiyun err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
1078*4882a593Smuzhiyun "%63s", protocol);
1079*4882a593Smuzhiyun if (err <= 0)
1080*4882a593Smuzhiyun strcpy(protocol, "unspecified, assuming default");
1081*4882a593Smuzhiyun else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
1082*4882a593Smuzhiyun blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
1083*4882a593Smuzhiyun else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
1084*4882a593Smuzhiyun blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
1085*4882a593Smuzhiyun else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
1086*4882a593Smuzhiyun blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
1087*4882a593Smuzhiyun else {
1088*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
1089*4882a593Smuzhiyun return -ENOSYS;
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
1093*4882a593Smuzhiyun blkif->vbd.feature_gnt_persistent =
1094*4882a593Smuzhiyun blkif->vbd.feature_gnt_persistent_parm &&
1095*4882a593Smuzhiyun xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun blkif->vbd.overflow_max_grants = 0;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /*
1100*4882a593Smuzhiyun * Read the number of hardware queues from frontend.
1101*4882a593Smuzhiyun */
1102*4882a593Smuzhiyun requested_num_queues = xenbus_read_unsigned(dev->otherend,
1103*4882a593Smuzhiyun "multi-queue-num-queues",
1104*4882a593Smuzhiyun 1);
1105*4882a593Smuzhiyun if (requested_num_queues > xenblk_max_queues
1106*4882a593Smuzhiyun || requested_num_queues == 0) {
1107*4882a593Smuzhiyun /* Buggy or malicious guest. */
1108*4882a593Smuzhiyun xenbus_dev_fatal(dev, err,
1109*4882a593Smuzhiyun "guest requested %u queues, exceeding the maximum of %u.",
1110*4882a593Smuzhiyun requested_num_queues, xenblk_max_queues);
1111*4882a593Smuzhiyun return -ENOSYS;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun blkif->nr_rings = requested_num_queues;
1114*4882a593Smuzhiyun if (xen_blkif_alloc_rings(blkif))
1115*4882a593Smuzhiyun return -ENOMEM;
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
1118*4882a593Smuzhiyun blkif->nr_rings, blkif->blk_protocol, protocol,
1119*4882a593Smuzhiyun blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
1122*4882a593Smuzhiyun &ring_page_order);
1123*4882a593Smuzhiyun if (err != 1) {
1124*4882a593Smuzhiyun blkif->nr_ring_pages = 1;
1125*4882a593Smuzhiyun blkif->multi_ref = false;
1126*4882a593Smuzhiyun } else if (ring_page_order <= xen_blkif_max_ring_order) {
1127*4882a593Smuzhiyun blkif->nr_ring_pages = 1 << ring_page_order;
1128*4882a593Smuzhiyun blkif->multi_ref = true;
1129*4882a593Smuzhiyun } else {
1130*4882a593Smuzhiyun err = -EINVAL;
1131*4882a593Smuzhiyun xenbus_dev_fatal(dev, err,
1132*4882a593Smuzhiyun "requested ring page order %d exceed max:%d",
1133*4882a593Smuzhiyun ring_page_order,
1134*4882a593Smuzhiyun xen_blkif_max_ring_order);
1135*4882a593Smuzhiyun return err;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun if (blkif->nr_rings == 1)
1139*4882a593Smuzhiyun return read_per_ring_refs(&blkif->rings[0], dev->otherend);
1140*4882a593Smuzhiyun else {
1141*4882a593Smuzhiyun xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
1142*4882a593Smuzhiyun xspath = kmalloc(xspathsize, GFP_KERNEL);
1143*4882a593Smuzhiyun if (!xspath) {
1144*4882a593Smuzhiyun xenbus_dev_fatal(dev, -ENOMEM, "reading ring references");
1145*4882a593Smuzhiyun return -ENOMEM;
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun for (i = 0; i < blkif->nr_rings; i++) {
1149*4882a593Smuzhiyun memset(xspath, 0, xspathsize);
1150*4882a593Smuzhiyun snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
1151*4882a593Smuzhiyun err = read_per_ring_refs(&blkif->rings[i], xspath);
1152*4882a593Smuzhiyun if (err) {
1153*4882a593Smuzhiyun kfree(xspath);
1154*4882a593Smuzhiyun return err;
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun kfree(xspath);
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun return 0;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun static const struct xenbus_device_id xen_blkbk_ids[] = {
1163*4882a593Smuzhiyun { "vbd" },
1164*4882a593Smuzhiyun { "" }
1165*4882a593Smuzhiyun };
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun static struct xenbus_driver xen_blkbk_driver = {
1168*4882a593Smuzhiyun .ids = xen_blkbk_ids,
1169*4882a593Smuzhiyun .probe = xen_blkbk_probe,
1170*4882a593Smuzhiyun .remove = xen_blkbk_remove,
1171*4882a593Smuzhiyun .otherend_changed = frontend_changed,
1172*4882a593Smuzhiyun .allow_rebind = true,
1173*4882a593Smuzhiyun .reclaim_memory = reclaim_memory,
1174*4882a593Smuzhiyun };
1175*4882a593Smuzhiyun
xen_blkif_xenbus_init(void)1176*4882a593Smuzhiyun int xen_blkif_xenbus_init(void)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun return xenbus_register_backend(&xen_blkbk_driver);
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
xen_blkif_xenbus_fini(void)1181*4882a593Smuzhiyun void xen_blkif_xenbus_fini(void)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun xenbus_unregister_driver(&xen_blkbk_driver);
1184*4882a593Smuzhiyun }
1185