xref: /OK3568_Linux_fs/kernel/drivers/xen/xenbus/xenbus_probe.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun  * Talks to Xen Store to figure out what devices we have.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2005 Rusty Russell, IBM Corporation
5*4882a593Smuzhiyun  * Copyright (C) 2005 Mike Wray, Hewlett-Packard
6*4882a593Smuzhiyun  * Copyright (C) 2005, 2006 XenSource Ltd
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or
9*4882a593Smuzhiyun  * modify it under the terms of the GNU General Public License version 2
10*4882a593Smuzhiyun  * as published by the Free Software Foundation; or, when distributed
11*4882a593Smuzhiyun  * separately from the Linux kernel or incorporated into other
12*4882a593Smuzhiyun  * software packages, subject to the following license:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a copy
15*4882a593Smuzhiyun  * of this source file (the "Software"), to deal in the Software without
16*4882a593Smuzhiyun  * restriction, including without limitation the rights to use, copy, modify,
17*4882a593Smuzhiyun  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18*4882a593Smuzhiyun  * and to permit persons to whom the Software is furnished to do so, subject to
19*4882a593Smuzhiyun  * the following conditions:
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * The above copyright notice and this permission notice shall be included in
22*4882a593Smuzhiyun  * all copies or substantial portions of the Software.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27*4882a593Smuzhiyun  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30*4882a593Smuzhiyun  * IN THE SOFTWARE.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34*4882a593Smuzhiyun #define dev_fmt pr_fmt
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define DPRINTK(fmt, args...)				\
37*4882a593Smuzhiyun 	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
38*4882a593Smuzhiyun 		 __func__, __LINE__, ##args)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include <linux/kernel.h>
41*4882a593Smuzhiyun #include <linux/err.h>
42*4882a593Smuzhiyun #include <linux/string.h>
43*4882a593Smuzhiyun #include <linux/ctype.h>
44*4882a593Smuzhiyun #include <linux/fcntl.h>
45*4882a593Smuzhiyun #include <linux/mm.h>
46*4882a593Smuzhiyun #include <linux/proc_fs.h>
47*4882a593Smuzhiyun #include <linux/notifier.h>
48*4882a593Smuzhiyun #include <linux/kthread.h>
49*4882a593Smuzhiyun #include <linux/mutex.h>
50*4882a593Smuzhiyun #include <linux/io.h>
51*4882a593Smuzhiyun #include <linux/slab.h>
52*4882a593Smuzhiyun #include <linux/module.h>
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #include <asm/page.h>
55*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #include <xen/xen.h>
58*4882a593Smuzhiyun #include <xen/xenbus.h>
59*4882a593Smuzhiyun #include <xen/events.h>
60*4882a593Smuzhiyun #include <xen/xen-ops.h>
61*4882a593Smuzhiyun #include <xen/page.h>
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #include <xen/hvm.h>
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #include "xenbus.h"
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun int xen_store_evtchn;
69*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_store_evtchn);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun struct xenstore_domain_interface *xen_store_interface;
72*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_store_interface);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun enum xenstore_init xen_store_domain_type;
75*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_store_domain_type);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun static unsigned long xen_store_gfn;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /* If something in array of ids matches this device, return it. */
82*4882a593Smuzhiyun static const struct xenbus_device_id *
match_device(const struct xenbus_device_id * arr,struct xenbus_device * dev)83*4882a593Smuzhiyun match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	for (; *arr->devicetype != '\0'; arr++) {
86*4882a593Smuzhiyun 		if (!strcmp(arr->devicetype, dev->devicetype))
87*4882a593Smuzhiyun 			return arr;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 	return NULL;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
xenbus_match(struct device * _dev,struct device_driver * _drv)92*4882a593Smuzhiyun int xenbus_match(struct device *_dev, struct device_driver *_drv)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct xenbus_driver *drv = to_xenbus_driver(_drv);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (!drv->ids)
97*4882a593Smuzhiyun 		return 0;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_match);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 
free_otherend_details(struct xenbus_device * dev)104*4882a593Smuzhiyun static void free_otherend_details(struct xenbus_device *dev)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	kfree(dev->otherend);
107*4882a593Smuzhiyun 	dev->otherend = NULL;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 
free_otherend_watch(struct xenbus_device * dev)111*4882a593Smuzhiyun static void free_otherend_watch(struct xenbus_device *dev)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	if (dev->otherend_watch.node) {
114*4882a593Smuzhiyun 		unregister_xenbus_watch(&dev->otherend_watch);
115*4882a593Smuzhiyun 		kfree(dev->otherend_watch.node);
116*4882a593Smuzhiyun 		dev->otherend_watch.node = NULL;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 
talk_to_otherend(struct xenbus_device * dev)121*4882a593Smuzhiyun static int talk_to_otherend(struct xenbus_device *dev)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	free_otherend_watch(dev);
126*4882a593Smuzhiyun 	free_otherend_details(dev);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return drv->read_otherend_details(dev);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 
watch_otherend(struct xenbus_device * dev)133*4882a593Smuzhiyun static int watch_otherend(struct xenbus_device *dev)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	struct xen_bus_type *bus =
136*4882a593Smuzhiyun 		container_of(dev->dev.bus, struct xen_bus_type, bus);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
139*4882a593Smuzhiyun 				    bus->otherend_will_handle,
140*4882a593Smuzhiyun 				    bus->otherend_changed,
141*4882a593Smuzhiyun 				    "%s/%s", dev->otherend, "state");
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 
xenbus_read_otherend_details(struct xenbus_device * xendev,char * id_node,char * path_node)145*4882a593Smuzhiyun int xenbus_read_otherend_details(struct xenbus_device *xendev,
146*4882a593Smuzhiyun 				 char *id_node, char *path_node)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	int err = xenbus_gather(XBT_NIL, xendev->nodename,
149*4882a593Smuzhiyun 				id_node, "%i", &xendev->otherend_id,
150*4882a593Smuzhiyun 				path_node, NULL, &xendev->otherend,
151*4882a593Smuzhiyun 				NULL);
152*4882a593Smuzhiyun 	if (err) {
153*4882a593Smuzhiyun 		xenbus_dev_fatal(xendev, err,
154*4882a593Smuzhiyun 				 "reading other end details from %s",
155*4882a593Smuzhiyun 				 xendev->nodename);
156*4882a593Smuzhiyun 		return err;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 	if (strlen(xendev->otherend) == 0 ||
159*4882a593Smuzhiyun 	    !xenbus_exists(XBT_NIL, xendev->otherend, "")) {
160*4882a593Smuzhiyun 		xenbus_dev_fatal(xendev, -ENOENT,
161*4882a593Smuzhiyun 				 "unable to read other end from %s.  "
162*4882a593Smuzhiyun 				 "missing or inaccessible.",
163*4882a593Smuzhiyun 				 xendev->nodename);
164*4882a593Smuzhiyun 		free_otherend_details(xendev);
165*4882a593Smuzhiyun 		return -ENOENT;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	return 0;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
171*4882a593Smuzhiyun 
xenbus_otherend_changed(struct xenbus_watch * watch,const char * path,const char * token,int ignore_on_shutdown)172*4882a593Smuzhiyun void xenbus_otherend_changed(struct xenbus_watch *watch,
173*4882a593Smuzhiyun 			     const char *path, const char *token,
174*4882a593Smuzhiyun 			     int ignore_on_shutdown)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct xenbus_device *dev =
177*4882a593Smuzhiyun 		container_of(watch, struct xenbus_device, otherend_watch);
178*4882a593Smuzhiyun 	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
179*4882a593Smuzhiyun 	enum xenbus_state state;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* Protect us against watches firing on old details when the otherend
182*4882a593Smuzhiyun 	   details change, say immediately after a resume. */
183*4882a593Smuzhiyun 	if (!dev->otherend ||
184*4882a593Smuzhiyun 	    strncmp(dev->otherend, path, strlen(dev->otherend))) {
185*4882a593Smuzhiyun 		dev_dbg(&dev->dev, "Ignoring watch at %s\n", path);
186*4882a593Smuzhiyun 		return;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	state = xenbus_read_driver_state(dev->otherend);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	dev_dbg(&dev->dev, "state is %d, (%s), %s, %s\n",
192*4882a593Smuzhiyun 		state, xenbus_strstate(state), dev->otherend_watch.node, path);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/*
195*4882a593Smuzhiyun 	 * Ignore xenbus transitions during shutdown. This prevents us doing
196*4882a593Smuzhiyun 	 * work that can fail e.g., when the rootfs is gone.
197*4882a593Smuzhiyun 	 */
198*4882a593Smuzhiyun 	if (system_state > SYSTEM_RUNNING) {
199*4882a593Smuzhiyun 		if (ignore_on_shutdown && (state == XenbusStateClosing))
200*4882a593Smuzhiyun 			xenbus_frontend_closed(dev);
201*4882a593Smuzhiyun 		return;
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	if (drv->otherend_changed)
205*4882a593Smuzhiyun 		drv->otherend_changed(dev, state);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
208*4882a593Smuzhiyun 
xenbus_dev_probe(struct device * _dev)209*4882a593Smuzhiyun int xenbus_dev_probe(struct device *_dev)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct xenbus_device *dev = to_xenbus_device(_dev);
212*4882a593Smuzhiyun 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
213*4882a593Smuzhiyun 	const struct xenbus_device_id *id;
214*4882a593Smuzhiyun 	int err;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	DPRINTK("%s", dev->nodename);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	if (!drv->probe) {
219*4882a593Smuzhiyun 		err = -ENODEV;
220*4882a593Smuzhiyun 		goto fail;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	id = match_device(drv->ids, dev);
224*4882a593Smuzhiyun 	if (!id) {
225*4882a593Smuzhiyun 		err = -ENODEV;
226*4882a593Smuzhiyun 		goto fail;
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	err = talk_to_otherend(dev);
230*4882a593Smuzhiyun 	if (err) {
231*4882a593Smuzhiyun 		dev_warn(&dev->dev, "talk_to_otherend on %s failed.\n",
232*4882a593Smuzhiyun 			 dev->nodename);
233*4882a593Smuzhiyun 		return err;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (!try_module_get(drv->driver.owner)) {
237*4882a593Smuzhiyun 		dev_warn(&dev->dev, "failed to acquire module reference on '%s'\n",
238*4882a593Smuzhiyun 			 drv->driver.name);
239*4882a593Smuzhiyun 		err = -ESRCH;
240*4882a593Smuzhiyun 		goto fail;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	down(&dev->reclaim_sem);
244*4882a593Smuzhiyun 	err = drv->probe(dev, id);
245*4882a593Smuzhiyun 	up(&dev->reclaim_sem);
246*4882a593Smuzhiyun 	if (err)
247*4882a593Smuzhiyun 		goto fail_put;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	err = watch_otherend(dev);
250*4882a593Smuzhiyun 	if (err) {
251*4882a593Smuzhiyun 		dev_warn(&dev->dev, "watch_otherend on %s failed.\n",
252*4882a593Smuzhiyun 		       dev->nodename);
253*4882a593Smuzhiyun 		return err;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	return 0;
257*4882a593Smuzhiyun fail_put:
258*4882a593Smuzhiyun 	module_put(drv->driver.owner);
259*4882a593Smuzhiyun fail:
260*4882a593Smuzhiyun 	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
261*4882a593Smuzhiyun 	return err;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_dev_probe);
264*4882a593Smuzhiyun 
xenbus_dev_remove(struct device * _dev)265*4882a593Smuzhiyun int xenbus_dev_remove(struct device *_dev)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct xenbus_device *dev = to_xenbus_device(_dev);
268*4882a593Smuzhiyun 	struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	DPRINTK("%s", dev->nodename);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	free_otherend_watch(dev);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	if (drv->remove) {
275*4882a593Smuzhiyun 		down(&dev->reclaim_sem);
276*4882a593Smuzhiyun 		drv->remove(dev);
277*4882a593Smuzhiyun 		up(&dev->reclaim_sem);
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	module_put(drv->driver.owner);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	free_otherend_details(dev);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	/*
285*4882a593Smuzhiyun 	 * If the toolstack has forced the device state to closing then set
286*4882a593Smuzhiyun 	 * the state to closed now to allow it to be cleaned up.
287*4882a593Smuzhiyun 	 * Similarly, if the driver does not support re-bind, set the
288*4882a593Smuzhiyun 	 * closed.
289*4882a593Smuzhiyun 	 */
290*4882a593Smuzhiyun 	if (!drv->allow_rebind ||
291*4882a593Smuzhiyun 	    xenbus_read_driver_state(dev->nodename) == XenbusStateClosing)
292*4882a593Smuzhiyun 		xenbus_switch_state(dev, XenbusStateClosed);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return 0;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_dev_remove);
297*4882a593Smuzhiyun 
xenbus_register_driver_common(struct xenbus_driver * drv,struct xen_bus_type * bus,struct module * owner,const char * mod_name)298*4882a593Smuzhiyun int xenbus_register_driver_common(struct xenbus_driver *drv,
299*4882a593Smuzhiyun 				  struct xen_bus_type *bus,
300*4882a593Smuzhiyun 				  struct module *owner, const char *mod_name)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	drv->driver.name = drv->name ? drv->name : drv->ids[0].devicetype;
303*4882a593Smuzhiyun 	drv->driver.bus = &bus->bus;
304*4882a593Smuzhiyun 	drv->driver.owner = owner;
305*4882a593Smuzhiyun 	drv->driver.mod_name = mod_name;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	return driver_register(&drv->driver);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
310*4882a593Smuzhiyun 
xenbus_unregister_driver(struct xenbus_driver * drv)311*4882a593Smuzhiyun void xenbus_unregister_driver(struct xenbus_driver *drv)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	driver_unregister(&drv->driver);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun struct xb_find_info {
318*4882a593Smuzhiyun 	struct xenbus_device *dev;
319*4882a593Smuzhiyun 	const char *nodename;
320*4882a593Smuzhiyun };
321*4882a593Smuzhiyun 
cmp_dev(struct device * dev,void * data)322*4882a593Smuzhiyun static int cmp_dev(struct device *dev, void *data)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct xenbus_device *xendev = to_xenbus_device(dev);
325*4882a593Smuzhiyun 	struct xb_find_info *info = data;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (!strcmp(xendev->nodename, info->nodename)) {
328*4882a593Smuzhiyun 		info->dev = xendev;
329*4882a593Smuzhiyun 		get_device(dev);
330*4882a593Smuzhiyun 		return 1;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 	return 0;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
xenbus_device_find(const char * nodename,struct bus_type * bus)335*4882a593Smuzhiyun static struct xenbus_device *xenbus_device_find(const char *nodename,
336*4882a593Smuzhiyun 						struct bus_type *bus)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct xb_find_info info = { .dev = NULL, .nodename = nodename };
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	bus_for_each_dev(bus, NULL, &info, cmp_dev);
341*4882a593Smuzhiyun 	return info.dev;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
cleanup_dev(struct device * dev,void * data)344*4882a593Smuzhiyun static int cleanup_dev(struct device *dev, void *data)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct xenbus_device *xendev = to_xenbus_device(dev);
347*4882a593Smuzhiyun 	struct xb_find_info *info = data;
348*4882a593Smuzhiyun 	int len = strlen(info->nodename);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	DPRINTK("%s", info->nodename);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/* Match the info->nodename path, or any subdirectory of that path. */
353*4882a593Smuzhiyun 	if (strncmp(xendev->nodename, info->nodename, len))
354*4882a593Smuzhiyun 		return 0;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/* If the node name is longer, ensure it really is a subdirectory. */
357*4882a593Smuzhiyun 	if ((strlen(xendev->nodename) > len) && (xendev->nodename[len] != '/'))
358*4882a593Smuzhiyun 		return 0;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	info->dev = xendev;
361*4882a593Smuzhiyun 	get_device(dev);
362*4882a593Smuzhiyun 	return 1;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
xenbus_cleanup_devices(const char * path,struct bus_type * bus)365*4882a593Smuzhiyun static void xenbus_cleanup_devices(const char *path, struct bus_type *bus)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	struct xb_find_info info = { .nodename = path };
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	do {
370*4882a593Smuzhiyun 		info.dev = NULL;
371*4882a593Smuzhiyun 		bus_for_each_dev(bus, NULL, &info, cleanup_dev);
372*4882a593Smuzhiyun 		if (info.dev) {
373*4882a593Smuzhiyun 			device_unregister(&info.dev->dev);
374*4882a593Smuzhiyun 			put_device(&info.dev->dev);
375*4882a593Smuzhiyun 		}
376*4882a593Smuzhiyun 	} while (info.dev);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
xenbus_dev_release(struct device * dev)379*4882a593Smuzhiyun static void xenbus_dev_release(struct device *dev)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	if (dev)
382*4882a593Smuzhiyun 		kfree(to_xenbus_device(dev));
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
nodename_show(struct device * dev,struct device_attribute * attr,char * buf)385*4882a593Smuzhiyun static ssize_t nodename_show(struct device *dev,
386*4882a593Smuzhiyun 			     struct device_attribute *attr, char *buf)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun static DEVICE_ATTR_RO(nodename);
391*4882a593Smuzhiyun 
devtype_show(struct device * dev,struct device_attribute * attr,char * buf)392*4882a593Smuzhiyun static ssize_t devtype_show(struct device *dev,
393*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun static DEVICE_ATTR_RO(devtype);
398*4882a593Smuzhiyun 
modalias_show(struct device * dev,struct device_attribute * attr,char * buf)399*4882a593Smuzhiyun static ssize_t modalias_show(struct device *dev,
400*4882a593Smuzhiyun 			     struct device_attribute *attr, char *buf)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	return sprintf(buf, "%s:%s\n", dev->bus->name,
403*4882a593Smuzhiyun 		       to_xenbus_device(dev)->devicetype);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun static DEVICE_ATTR_RO(modalias);
406*4882a593Smuzhiyun 
state_show(struct device * dev,struct device_attribute * attr,char * buf)407*4882a593Smuzhiyun static ssize_t state_show(struct device *dev,
408*4882a593Smuzhiyun 			    struct device_attribute *attr, char *buf)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	return sprintf(buf, "%s\n",
411*4882a593Smuzhiyun 			xenbus_strstate(to_xenbus_device(dev)->state));
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun static DEVICE_ATTR_RO(state);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun static struct attribute *xenbus_dev_attrs[] = {
416*4882a593Smuzhiyun 	&dev_attr_nodename.attr,
417*4882a593Smuzhiyun 	&dev_attr_devtype.attr,
418*4882a593Smuzhiyun 	&dev_attr_modalias.attr,
419*4882a593Smuzhiyun 	&dev_attr_state.attr,
420*4882a593Smuzhiyun 	NULL,
421*4882a593Smuzhiyun };
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun static const struct attribute_group xenbus_dev_group = {
424*4882a593Smuzhiyun 	.attrs = xenbus_dev_attrs,
425*4882a593Smuzhiyun };
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun const struct attribute_group *xenbus_dev_groups[] = {
428*4882a593Smuzhiyun 	&xenbus_dev_group,
429*4882a593Smuzhiyun 	NULL,
430*4882a593Smuzhiyun };
431*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_dev_groups);
432*4882a593Smuzhiyun 
xenbus_probe_node(struct xen_bus_type * bus,const char * type,const char * nodename)433*4882a593Smuzhiyun int xenbus_probe_node(struct xen_bus_type *bus,
434*4882a593Smuzhiyun 		      const char *type,
435*4882a593Smuzhiyun 		      const char *nodename)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	char devname[XEN_BUS_ID_SIZE];
438*4882a593Smuzhiyun 	int err;
439*4882a593Smuzhiyun 	struct xenbus_device *xendev;
440*4882a593Smuzhiyun 	size_t stringlen;
441*4882a593Smuzhiyun 	char *tmpstring;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	enum xenbus_state state = xenbus_read_driver_state(nodename);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if (state != XenbusStateInitialising) {
446*4882a593Smuzhiyun 		/* Device is not new, so ignore it.  This can happen if a
447*4882a593Smuzhiyun 		   device is going away after switching to Closed.  */
448*4882a593Smuzhiyun 		return 0;
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	stringlen = strlen(nodename) + 1 + strlen(type) + 1;
452*4882a593Smuzhiyun 	xendev = kzalloc(sizeof(*xendev) + stringlen, GFP_KERNEL);
453*4882a593Smuzhiyun 	if (!xendev)
454*4882a593Smuzhiyun 		return -ENOMEM;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	xendev->state = XenbusStateInitialising;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	/* Copy the strings into the extra space. */
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	tmpstring = (char *)(xendev + 1);
461*4882a593Smuzhiyun 	strcpy(tmpstring, nodename);
462*4882a593Smuzhiyun 	xendev->nodename = tmpstring;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	tmpstring += strlen(tmpstring) + 1;
465*4882a593Smuzhiyun 	strcpy(tmpstring, type);
466*4882a593Smuzhiyun 	xendev->devicetype = tmpstring;
467*4882a593Smuzhiyun 	init_completion(&xendev->down);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	xendev->dev.bus = &bus->bus;
470*4882a593Smuzhiyun 	xendev->dev.release = xenbus_dev_release;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	err = bus->get_bus_id(devname, xendev->nodename);
473*4882a593Smuzhiyun 	if (err)
474*4882a593Smuzhiyun 		goto fail;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	dev_set_name(&xendev->dev, "%s", devname);
477*4882a593Smuzhiyun 	sema_init(&xendev->reclaim_sem, 1);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	/* Register with generic device framework. */
480*4882a593Smuzhiyun 	err = device_register(&xendev->dev);
481*4882a593Smuzhiyun 	if (err) {
482*4882a593Smuzhiyun 		put_device(&xendev->dev);
483*4882a593Smuzhiyun 		xendev = NULL;
484*4882a593Smuzhiyun 		goto fail;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	return 0;
488*4882a593Smuzhiyun fail:
489*4882a593Smuzhiyun 	kfree(xendev);
490*4882a593Smuzhiyun 	return err;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_probe_node);
493*4882a593Smuzhiyun 
xenbus_probe_device_type(struct xen_bus_type * bus,const char * type)494*4882a593Smuzhiyun static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	int err = 0;
497*4882a593Smuzhiyun 	char **dir;
498*4882a593Smuzhiyun 	unsigned int dir_n = 0;
499*4882a593Smuzhiyun 	int i;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	dir = xenbus_directory(XBT_NIL, bus->root, type, &dir_n);
502*4882a593Smuzhiyun 	if (IS_ERR(dir))
503*4882a593Smuzhiyun 		return PTR_ERR(dir);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	for (i = 0; i < dir_n; i++) {
506*4882a593Smuzhiyun 		err = bus->probe(bus, type, dir[i]);
507*4882a593Smuzhiyun 		if (err)
508*4882a593Smuzhiyun 			break;
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	kfree(dir);
512*4882a593Smuzhiyun 	return err;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
xenbus_probe_devices(struct xen_bus_type * bus)515*4882a593Smuzhiyun int xenbus_probe_devices(struct xen_bus_type *bus)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	int err = 0;
518*4882a593Smuzhiyun 	char **dir;
519*4882a593Smuzhiyun 	unsigned int i, dir_n;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	dir = xenbus_directory(XBT_NIL, bus->root, "", &dir_n);
522*4882a593Smuzhiyun 	if (IS_ERR(dir))
523*4882a593Smuzhiyun 		return PTR_ERR(dir);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	for (i = 0; i < dir_n; i++) {
526*4882a593Smuzhiyun 		err = xenbus_probe_device_type(bus, dir[i]);
527*4882a593Smuzhiyun 		if (err)
528*4882a593Smuzhiyun 			break;
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	kfree(dir);
532*4882a593Smuzhiyun 	return err;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_probe_devices);
535*4882a593Smuzhiyun 
char_count(const char * str,char c)536*4882a593Smuzhiyun static unsigned int char_count(const char *str, char c)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	unsigned int i, ret = 0;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	for (i = 0; str[i]; i++)
541*4882a593Smuzhiyun 		if (str[i] == c)
542*4882a593Smuzhiyun 			ret++;
543*4882a593Smuzhiyun 	return ret;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
strsep_len(const char * str,char c,unsigned int len)546*4882a593Smuzhiyun static int strsep_len(const char *str, char c, unsigned int len)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	unsigned int i;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	for (i = 0; str[i]; i++)
551*4882a593Smuzhiyun 		if (str[i] == c) {
552*4882a593Smuzhiyun 			if (len == 0)
553*4882a593Smuzhiyun 				return i;
554*4882a593Smuzhiyun 			len--;
555*4882a593Smuzhiyun 		}
556*4882a593Smuzhiyun 	return (len == 0) ? i : -ERANGE;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
xenbus_dev_changed(const char * node,struct xen_bus_type * bus)559*4882a593Smuzhiyun void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	int exists, rootlen;
562*4882a593Smuzhiyun 	struct xenbus_device *dev;
563*4882a593Smuzhiyun 	char type[XEN_BUS_ID_SIZE];
564*4882a593Smuzhiyun 	const char *p, *root;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	if (char_count(node, '/') < 2)
567*4882a593Smuzhiyun 		return;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	exists = xenbus_exists(XBT_NIL, node, "");
570*4882a593Smuzhiyun 	if (!exists) {
571*4882a593Smuzhiyun 		xenbus_cleanup_devices(node, &bus->bus);
572*4882a593Smuzhiyun 		return;
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	/* backend/<type>/... or device/<type>/... */
576*4882a593Smuzhiyun 	p = strchr(node, '/') + 1;
577*4882a593Smuzhiyun 	snprintf(type, XEN_BUS_ID_SIZE, "%.*s", (int)strcspn(p, "/"), p);
578*4882a593Smuzhiyun 	type[XEN_BUS_ID_SIZE-1] = '\0';
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	rootlen = strsep_len(node, '/', bus->levels);
581*4882a593Smuzhiyun 	if (rootlen < 0)
582*4882a593Smuzhiyun 		return;
583*4882a593Smuzhiyun 	root = kasprintf(GFP_KERNEL, "%.*s", rootlen, node);
584*4882a593Smuzhiyun 	if (!root)
585*4882a593Smuzhiyun 		return;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	dev = xenbus_device_find(root, &bus->bus);
588*4882a593Smuzhiyun 	if (!dev)
589*4882a593Smuzhiyun 		xenbus_probe_node(bus, type, root);
590*4882a593Smuzhiyun 	else
591*4882a593Smuzhiyun 		put_device(&dev->dev);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	kfree(root);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_dev_changed);
596*4882a593Smuzhiyun 
xenbus_dev_suspend(struct device * dev)597*4882a593Smuzhiyun int xenbus_dev_suspend(struct device *dev)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	int err = 0;
600*4882a593Smuzhiyun 	struct xenbus_driver *drv;
601*4882a593Smuzhiyun 	struct xenbus_device *xdev
602*4882a593Smuzhiyun 		= container_of(dev, struct xenbus_device, dev);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	DPRINTK("%s", xdev->nodename);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	if (dev->driver == NULL)
607*4882a593Smuzhiyun 		return 0;
608*4882a593Smuzhiyun 	drv = to_xenbus_driver(dev->driver);
609*4882a593Smuzhiyun 	if (drv->suspend)
610*4882a593Smuzhiyun 		err = drv->suspend(xdev);
611*4882a593Smuzhiyun 	if (err)
612*4882a593Smuzhiyun 		dev_warn(dev, "suspend failed: %i\n", err);
613*4882a593Smuzhiyun 	return 0;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
616*4882a593Smuzhiyun 
xenbus_dev_resume(struct device * dev)617*4882a593Smuzhiyun int xenbus_dev_resume(struct device *dev)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	int err;
620*4882a593Smuzhiyun 	struct xenbus_driver *drv;
621*4882a593Smuzhiyun 	struct xenbus_device *xdev
622*4882a593Smuzhiyun 		= container_of(dev, struct xenbus_device, dev);
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	DPRINTK("%s", xdev->nodename);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (dev->driver == NULL)
627*4882a593Smuzhiyun 		return 0;
628*4882a593Smuzhiyun 	drv = to_xenbus_driver(dev->driver);
629*4882a593Smuzhiyun 	err = talk_to_otherend(xdev);
630*4882a593Smuzhiyun 	if (err) {
631*4882a593Smuzhiyun 		dev_warn(dev, "resume (talk_to_otherend) failed: %i\n", err);
632*4882a593Smuzhiyun 		return err;
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	xdev->state = XenbusStateInitialising;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	if (drv->resume) {
638*4882a593Smuzhiyun 		err = drv->resume(xdev);
639*4882a593Smuzhiyun 		if (err) {
640*4882a593Smuzhiyun 			dev_warn(dev, "resume failed: %i\n", err);
641*4882a593Smuzhiyun 			return err;
642*4882a593Smuzhiyun 		}
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	err = watch_otherend(xdev);
646*4882a593Smuzhiyun 	if (err) {
647*4882a593Smuzhiyun 		dev_warn(dev, "resume (watch_otherend) failed: %d\n", err);
648*4882a593Smuzhiyun 		return err;
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	return 0;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_dev_resume);
654*4882a593Smuzhiyun 
xenbus_dev_cancel(struct device * dev)655*4882a593Smuzhiyun int xenbus_dev_cancel(struct device *dev)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	/* Do nothing */
658*4882a593Smuzhiyun 	DPRINTK("cancel");
659*4882a593Smuzhiyun 	return 0;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun /* A flag to determine if xenstored is 'ready' (i.e. has started) */
664*4882a593Smuzhiyun int xenstored_ready;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 
register_xenstore_notifier(struct notifier_block * nb)667*4882a593Smuzhiyun int register_xenstore_notifier(struct notifier_block *nb)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	int ret = 0;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	if (xenstored_ready > 0)
672*4882a593Smuzhiyun 		ret = nb->notifier_call(nb, 0, NULL);
673*4882a593Smuzhiyun 	else
674*4882a593Smuzhiyun 		blocking_notifier_chain_register(&xenstore_chain, nb);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	return ret;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(register_xenstore_notifier);
679*4882a593Smuzhiyun 
unregister_xenstore_notifier(struct notifier_block * nb)680*4882a593Smuzhiyun void unregister_xenstore_notifier(struct notifier_block *nb)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	blocking_notifier_chain_unregister(&xenstore_chain, nb);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
685*4882a593Smuzhiyun 
xenbus_probe(void)686*4882a593Smuzhiyun static void xenbus_probe(void)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	xenstored_ready = 1;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	/*
691*4882a593Smuzhiyun 	 * In the HVM case, xenbus_init() deferred its call to
692*4882a593Smuzhiyun 	 * xs_init() in case callbacks were not operational yet.
693*4882a593Smuzhiyun 	 * So do it now.
694*4882a593Smuzhiyun 	 */
695*4882a593Smuzhiyun 	if (xen_store_domain_type == XS_HVM)
696*4882a593Smuzhiyun 		xs_init();
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	/* Notify others that xenstore is up */
699*4882a593Smuzhiyun 	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun /*
703*4882a593Smuzhiyun  * Returns true when XenStore init must be deferred in order to
704*4882a593Smuzhiyun  * allow the PCI platform device to be initialised, before we
705*4882a593Smuzhiyun  * can actually have event channel interrupts working.
706*4882a593Smuzhiyun  */
xs_hvm_defer_init_for_callback(void)707*4882a593Smuzhiyun static bool xs_hvm_defer_init_for_callback(void)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun #ifdef CONFIG_XEN_PVHVM
710*4882a593Smuzhiyun 	return xen_store_domain_type == XS_HVM &&
711*4882a593Smuzhiyun 		!xen_have_vector_callback;
712*4882a593Smuzhiyun #else
713*4882a593Smuzhiyun 	return false;
714*4882a593Smuzhiyun #endif
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
xenbus_probe_thread(void * unused)717*4882a593Smuzhiyun static int xenbus_probe_thread(void *unused)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	DEFINE_WAIT(w);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	/*
722*4882a593Smuzhiyun 	 * We actually just want to wait for *any* trigger of xb_waitq,
723*4882a593Smuzhiyun 	 * and run xenbus_probe() the moment it occurs.
724*4882a593Smuzhiyun 	 */
725*4882a593Smuzhiyun 	prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
726*4882a593Smuzhiyun 	schedule();
727*4882a593Smuzhiyun 	finish_wait(&xb_waitq, &w);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	DPRINTK("probing");
730*4882a593Smuzhiyun 	xenbus_probe();
731*4882a593Smuzhiyun 	return 0;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
xenbus_probe_initcall(void)734*4882a593Smuzhiyun static int __init xenbus_probe_initcall(void)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun 	/*
737*4882a593Smuzhiyun 	 * Probe XenBus here in the XS_PV case, and also XS_HVM unless we
738*4882a593Smuzhiyun 	 * need to wait for the platform PCI device to come up.
739*4882a593Smuzhiyun 	 */
740*4882a593Smuzhiyun 	if (xen_store_domain_type == XS_PV ||
741*4882a593Smuzhiyun 	    (xen_store_domain_type == XS_HVM &&
742*4882a593Smuzhiyun 	     !xs_hvm_defer_init_for_callback()))
743*4882a593Smuzhiyun 		xenbus_probe();
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	/*
746*4882a593Smuzhiyun 	 * For XS_LOCAL, spawn a thread which will wait for xenstored
747*4882a593Smuzhiyun 	 * or a xenstore-stubdom to be started, then probe. It will be
748*4882a593Smuzhiyun 	 * triggered when communication starts happening, by waiting
749*4882a593Smuzhiyun 	 * on xb_waitq.
750*4882a593Smuzhiyun 	 */
751*4882a593Smuzhiyun 	if (xen_store_domain_type == XS_LOCAL) {
752*4882a593Smuzhiyun 		struct task_struct *probe_task;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 		probe_task = kthread_run(xenbus_probe_thread, NULL,
755*4882a593Smuzhiyun 					 "xenbus_probe");
756*4882a593Smuzhiyun 		if (IS_ERR(probe_task))
757*4882a593Smuzhiyun 			return PTR_ERR(probe_task);
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun 	return 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun device_initcall(xenbus_probe_initcall);
762*4882a593Smuzhiyun 
xen_set_callback_via(uint64_t via)763*4882a593Smuzhiyun int xen_set_callback_via(uint64_t via)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	struct xen_hvm_param a;
766*4882a593Smuzhiyun 	int ret;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	a.domid = DOMID_SELF;
769*4882a593Smuzhiyun 	a.index = HVM_PARAM_CALLBACK_IRQ;
770*4882a593Smuzhiyun 	a.value = via;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	ret = HYPERVISOR_hvm_op(HVMOP_set_param, &a);
773*4882a593Smuzhiyun 	if (ret)
774*4882a593Smuzhiyun 		return ret;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	/*
777*4882a593Smuzhiyun 	 * If xenbus_probe_initcall() deferred the xenbus_probe()
778*4882a593Smuzhiyun 	 * due to the callback not functioning yet, we can do it now.
779*4882a593Smuzhiyun 	 */
780*4882a593Smuzhiyun 	if (!xenstored_ready && xs_hvm_defer_init_for_callback())
781*4882a593Smuzhiyun 		xenbus_probe();
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	return ret;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xen_set_callback_via);
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun /* Set up event channel for xenstored which is run as a local process
788*4882a593Smuzhiyun  * (this is normally used only in dom0)
789*4882a593Smuzhiyun  */
xenstored_local_init(void)790*4882a593Smuzhiyun static int __init xenstored_local_init(void)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun 	int err = -ENOMEM;
793*4882a593Smuzhiyun 	unsigned long page = 0;
794*4882a593Smuzhiyun 	struct evtchn_alloc_unbound alloc_unbound;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	/* Allocate Xenstore page */
797*4882a593Smuzhiyun 	page = get_zeroed_page(GFP_KERNEL);
798*4882a593Smuzhiyun 	if (!page)
799*4882a593Smuzhiyun 		goto out_err;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	xen_store_gfn = virt_to_gfn((void *)page);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	/* Next allocate a local port which xenstored can bind to */
804*4882a593Smuzhiyun 	alloc_unbound.dom        = DOMID_SELF;
805*4882a593Smuzhiyun 	alloc_unbound.remote_dom = DOMID_SELF;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
808*4882a593Smuzhiyun 					  &alloc_unbound);
809*4882a593Smuzhiyun 	if (err == -ENOSYS)
810*4882a593Smuzhiyun 		goto out_err;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	BUG_ON(err);
813*4882a593Smuzhiyun 	xen_store_evtchn = alloc_unbound.port;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	return 0;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun  out_err:
818*4882a593Smuzhiyun 	if (page != 0)
819*4882a593Smuzhiyun 		free_page(page);
820*4882a593Smuzhiyun 	return err;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun 
xenbus_resume_cb(struct notifier_block * nb,unsigned long action,void * data)823*4882a593Smuzhiyun static int xenbus_resume_cb(struct notifier_block *nb,
824*4882a593Smuzhiyun 			    unsigned long action, void *data)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun 	int err = 0;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	if (xen_hvm_domain()) {
829*4882a593Smuzhiyun 		uint64_t v = 0;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
832*4882a593Smuzhiyun 		if (!err && v)
833*4882a593Smuzhiyun 			xen_store_evtchn = v;
834*4882a593Smuzhiyun 		else
835*4882a593Smuzhiyun 			pr_warn("Cannot update xenstore event channel: %d\n",
836*4882a593Smuzhiyun 				err);
837*4882a593Smuzhiyun 	} else
838*4882a593Smuzhiyun 		xen_store_evtchn = xen_start_info->store_evtchn;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	return err;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun static struct notifier_block xenbus_resume_nb = {
844*4882a593Smuzhiyun 	.notifier_call = xenbus_resume_cb,
845*4882a593Smuzhiyun };
846*4882a593Smuzhiyun 
xenbus_init(void)847*4882a593Smuzhiyun static int __init xenbus_init(void)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	int err;
850*4882a593Smuzhiyun 	uint64_t v = 0;
851*4882a593Smuzhiyun 	xen_store_domain_type = XS_UNKNOWN;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	if (!xen_domain())
854*4882a593Smuzhiyun 		return -ENODEV;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	xenbus_ring_ops_init();
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	if (xen_pv_domain())
859*4882a593Smuzhiyun 		xen_store_domain_type = XS_PV;
860*4882a593Smuzhiyun 	if (xen_hvm_domain())
861*4882a593Smuzhiyun 		xen_store_domain_type = XS_HVM;
862*4882a593Smuzhiyun 	if (xen_hvm_domain() && xen_initial_domain())
863*4882a593Smuzhiyun 		xen_store_domain_type = XS_LOCAL;
864*4882a593Smuzhiyun 	if (xen_pv_domain() && !xen_start_info->store_evtchn)
865*4882a593Smuzhiyun 		xen_store_domain_type = XS_LOCAL;
866*4882a593Smuzhiyun 	if (xen_pv_domain() && xen_start_info->store_evtchn)
867*4882a593Smuzhiyun 		xenstored_ready = 1;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	switch (xen_store_domain_type) {
870*4882a593Smuzhiyun 	case XS_LOCAL:
871*4882a593Smuzhiyun 		err = xenstored_local_init();
872*4882a593Smuzhiyun 		if (err)
873*4882a593Smuzhiyun 			goto out_error;
874*4882a593Smuzhiyun 		xen_store_interface = gfn_to_virt(xen_store_gfn);
875*4882a593Smuzhiyun 		break;
876*4882a593Smuzhiyun 	case XS_PV:
877*4882a593Smuzhiyun 		xen_store_evtchn = xen_start_info->store_evtchn;
878*4882a593Smuzhiyun 		xen_store_gfn = xen_start_info->store_mfn;
879*4882a593Smuzhiyun 		xen_store_interface = gfn_to_virt(xen_store_gfn);
880*4882a593Smuzhiyun 		break;
881*4882a593Smuzhiyun 	case XS_HVM:
882*4882a593Smuzhiyun 		err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
883*4882a593Smuzhiyun 		if (err)
884*4882a593Smuzhiyun 			goto out_error;
885*4882a593Smuzhiyun 		xen_store_evtchn = (int)v;
886*4882a593Smuzhiyun 		err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
887*4882a593Smuzhiyun 		if (err)
888*4882a593Smuzhiyun 			goto out_error;
889*4882a593Smuzhiyun 		/*
890*4882a593Smuzhiyun 		 * Uninitialized hvm_params are zero and return no error.
891*4882a593Smuzhiyun 		 * Although it is theoretically possible to have
892*4882a593Smuzhiyun 		 * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
893*4882a593Smuzhiyun 		 * not zero when valid. If zero, it means that Xenstore hasn't
894*4882a593Smuzhiyun 		 * been properly initialized. Instead of attempting to map a
895*4882a593Smuzhiyun 		 * wrong guest physical address return error.
896*4882a593Smuzhiyun 		 *
897*4882a593Smuzhiyun 		 * Also recognize all bits set as an invalid value.
898*4882a593Smuzhiyun 		 */
899*4882a593Smuzhiyun 		if (!v || !~v) {
900*4882a593Smuzhiyun 			err = -ENOENT;
901*4882a593Smuzhiyun 			goto out_error;
902*4882a593Smuzhiyun 		}
903*4882a593Smuzhiyun 		/* Avoid truncation on 32-bit. */
904*4882a593Smuzhiyun #if BITS_PER_LONG == 32
905*4882a593Smuzhiyun 		if (v > ULONG_MAX) {
906*4882a593Smuzhiyun 			pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
907*4882a593Smuzhiyun 			       __func__, v);
908*4882a593Smuzhiyun 			err = -EINVAL;
909*4882a593Smuzhiyun 			goto out_error;
910*4882a593Smuzhiyun 		}
911*4882a593Smuzhiyun #endif
912*4882a593Smuzhiyun 		xen_store_gfn = (unsigned long)v;
913*4882a593Smuzhiyun 		xen_store_interface =
914*4882a593Smuzhiyun 			xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
915*4882a593Smuzhiyun 				  XEN_PAGE_SIZE);
916*4882a593Smuzhiyun 		break;
917*4882a593Smuzhiyun 	default:
918*4882a593Smuzhiyun 		pr_warn("Xenstore state unknown\n");
919*4882a593Smuzhiyun 		break;
920*4882a593Smuzhiyun 	}
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	/*
923*4882a593Smuzhiyun 	 * HVM domains may not have a functional callback yet. In that
924*4882a593Smuzhiyun 	 * case let xs_init() be called from xenbus_probe(), which will
925*4882a593Smuzhiyun 	 * get invoked at an appropriate time.
926*4882a593Smuzhiyun 	 */
927*4882a593Smuzhiyun 	if (xen_store_domain_type != XS_HVM) {
928*4882a593Smuzhiyun 		err = xs_init();
929*4882a593Smuzhiyun 		if (err) {
930*4882a593Smuzhiyun 			pr_warn("Error initializing xenstore comms: %i\n", err);
931*4882a593Smuzhiyun 			goto out_error;
932*4882a593Smuzhiyun 		}
933*4882a593Smuzhiyun 	}
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	if ((xen_store_domain_type != XS_LOCAL) &&
936*4882a593Smuzhiyun 	    (xen_store_domain_type != XS_UNKNOWN))
937*4882a593Smuzhiyun 		xen_resume_notifier_register(&xenbus_resume_nb);
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun #ifdef CONFIG_XEN_COMPAT_XENFS
940*4882a593Smuzhiyun 	/*
941*4882a593Smuzhiyun 	 * Create xenfs mountpoint in /proc for compatibility with
942*4882a593Smuzhiyun 	 * utilities that expect to find "xenbus" under "/proc/xen".
943*4882a593Smuzhiyun 	 */
944*4882a593Smuzhiyun 	proc_create_mount_point("xen");
945*4882a593Smuzhiyun #endif
946*4882a593Smuzhiyun 	return 0;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun out_error:
949*4882a593Smuzhiyun 	xen_store_domain_type = XS_UNKNOWN;
950*4882a593Smuzhiyun 	return err;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun postcore_initcall(xenbus_init);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun MODULE_LICENSE("GPL");
956