1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun * Client-facing interface for the Xenbus driver. In other words, the
3*4882a593Smuzhiyun * interface between the Xenbus and the device-specific code, be it the
4*4882a593Smuzhiyun * frontend or the backend of that driver.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2005 XenSource Ltd
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
9*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License version 2
10*4882a593Smuzhiyun * as published by the Free Software Foundation; or, when distributed
11*4882a593Smuzhiyun * separately from the Linux kernel or incorporated into other
12*4882a593Smuzhiyun * software packages, subject to the following license:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a copy
15*4882a593Smuzhiyun * of this source file (the "Software"), to deal in the Software without
16*4882a593Smuzhiyun * restriction, including without limitation the rights to use, copy, modify,
17*4882a593Smuzhiyun * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18*4882a593Smuzhiyun * and to permit persons to whom the Software is furnished to do so, subject to
19*4882a593Smuzhiyun * the following conditions:
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
22*4882a593Smuzhiyun * all copies or substantial portions of the Software.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27*4882a593Smuzhiyun * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30*4882a593Smuzhiyun * IN THE SOFTWARE.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/mm.h>
34*4882a593Smuzhiyun #include <linux/slab.h>
35*4882a593Smuzhiyun #include <linux/types.h>
36*4882a593Smuzhiyun #include <linux/spinlock.h>
37*4882a593Smuzhiyun #include <linux/vmalloc.h>
38*4882a593Smuzhiyun #include <linux/export.h>
39*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
40*4882a593Smuzhiyun #include <xen/page.h>
41*4882a593Smuzhiyun #include <xen/interface/xen.h>
42*4882a593Smuzhiyun #include <xen/interface/event_channel.h>
43*4882a593Smuzhiyun #include <xen/balloon.h>
44*4882a593Smuzhiyun #include <xen/events.h>
45*4882a593Smuzhiyun #include <xen/grant_table.h>
46*4882a593Smuzhiyun #include <xen/xenbus.h>
47*4882a593Smuzhiyun #include <xen/xen.h>
48*4882a593Smuzhiyun #include <xen/features.h>
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include "xenbus.h"
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun struct xenbus_map_node {
57*4882a593Smuzhiyun struct list_head next;
58*4882a593Smuzhiyun union {
59*4882a593Smuzhiyun struct {
60*4882a593Smuzhiyun struct vm_struct *area;
61*4882a593Smuzhiyun } pv;
62*4882a593Smuzhiyun struct {
63*4882a593Smuzhiyun struct page *pages[XENBUS_MAX_RING_PAGES];
64*4882a593Smuzhiyun unsigned long addrs[XENBUS_MAX_RING_GRANTS];
65*4882a593Smuzhiyun void *addr;
66*4882a593Smuzhiyun } hvm;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun grant_handle_t handles[XENBUS_MAX_RING_GRANTS];
69*4882a593Smuzhiyun unsigned int nr_handles;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun struct map_ring_valloc {
73*4882a593Smuzhiyun struct xenbus_map_node *node;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* Why do we need two arrays? See comment of __xenbus_map_ring */
76*4882a593Smuzhiyun unsigned long addrs[XENBUS_MAX_RING_GRANTS];
77*4882a593Smuzhiyun phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
80*4882a593Smuzhiyun struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun unsigned int idx;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun static DEFINE_SPINLOCK(xenbus_valloc_lock);
86*4882a593Smuzhiyun static LIST_HEAD(xenbus_valloc_pages);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun struct xenbus_ring_ops {
89*4882a593Smuzhiyun int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
90*4882a593Smuzhiyun grant_ref_t *gnt_refs, unsigned int nr_grefs,
91*4882a593Smuzhiyun void **vaddr);
92*4882a593Smuzhiyun int (*unmap)(struct xenbus_device *dev, void *vaddr);
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun static const struct xenbus_ring_ops *ring_ops __read_mostly;
96*4882a593Smuzhiyun
xenbus_strstate(enum xenbus_state state)97*4882a593Smuzhiyun const char *xenbus_strstate(enum xenbus_state state)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun static const char *const name[] = {
100*4882a593Smuzhiyun [ XenbusStateUnknown ] = "Unknown",
101*4882a593Smuzhiyun [ XenbusStateInitialising ] = "Initialising",
102*4882a593Smuzhiyun [ XenbusStateInitWait ] = "InitWait",
103*4882a593Smuzhiyun [ XenbusStateInitialised ] = "Initialised",
104*4882a593Smuzhiyun [ XenbusStateConnected ] = "Connected",
105*4882a593Smuzhiyun [ XenbusStateClosing ] = "Closing",
106*4882a593Smuzhiyun [ XenbusStateClosed ] = "Closed",
107*4882a593Smuzhiyun [XenbusStateReconfiguring] = "Reconfiguring",
108*4882a593Smuzhiyun [XenbusStateReconfigured] = "Reconfigured",
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_strstate);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /**
115*4882a593Smuzhiyun * xenbus_watch_path - register a watch
116*4882a593Smuzhiyun * @dev: xenbus device
117*4882a593Smuzhiyun * @path: path to watch
118*4882a593Smuzhiyun * @watch: watch to register
119*4882a593Smuzhiyun * @callback: callback to register
120*4882a593Smuzhiyun *
121*4882a593Smuzhiyun * Register a @watch on the given path, using the given xenbus_watch structure
122*4882a593Smuzhiyun * for storage, and the given @callback function as the callback. Return 0 on
123*4882a593Smuzhiyun * success, or -errno on error. On success, the given @path will be saved as
124*4882a593Smuzhiyun * @watch->node, and remains the caller's to free. On error, @watch->node will
125*4882a593Smuzhiyun * be NULL, the device will switch to %XenbusStateClosing, and the error will
126*4882a593Smuzhiyun * be saved in the store.
127*4882a593Smuzhiyun */
xenbus_watch_path(struct xenbus_device * dev,const char * path,struct xenbus_watch * watch,bool (* will_handle)(struct xenbus_watch *,const char *,const char *),void (* callback)(struct xenbus_watch *,const char *,const char *))128*4882a593Smuzhiyun int xenbus_watch_path(struct xenbus_device *dev, const char *path,
129*4882a593Smuzhiyun struct xenbus_watch *watch,
130*4882a593Smuzhiyun bool (*will_handle)(struct xenbus_watch *,
131*4882a593Smuzhiyun const char *, const char *),
132*4882a593Smuzhiyun void (*callback)(struct xenbus_watch *,
133*4882a593Smuzhiyun const char *, const char *))
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun int err;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun watch->node = path;
138*4882a593Smuzhiyun watch->will_handle = will_handle;
139*4882a593Smuzhiyun watch->callback = callback;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun err = register_xenbus_watch(watch);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (err) {
144*4882a593Smuzhiyun watch->node = NULL;
145*4882a593Smuzhiyun watch->will_handle = NULL;
146*4882a593Smuzhiyun watch->callback = NULL;
147*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "adding watch on %s", path);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun return err;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_watch_path);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
157*4882a593Smuzhiyun * @dev: xenbus device
158*4882a593Smuzhiyun * @watch: watch to register
159*4882a593Smuzhiyun * @callback: callback to register
160*4882a593Smuzhiyun * @pathfmt: format of path to watch
161*4882a593Smuzhiyun *
162*4882a593Smuzhiyun * Register a watch on the given @path, using the given xenbus_watch
163*4882a593Smuzhiyun * structure for storage, and the given @callback function as the callback.
164*4882a593Smuzhiyun * Return 0 on success, or -errno on error. On success, the watched path
165*4882a593Smuzhiyun * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
166*4882a593Smuzhiyun * kfree(). On error, watch->node will be NULL, so the caller has nothing to
167*4882a593Smuzhiyun * free, the device will switch to %XenbusStateClosing, and the error will be
168*4882a593Smuzhiyun * saved in the store.
169*4882a593Smuzhiyun */
xenbus_watch_pathfmt(struct xenbus_device * dev,struct xenbus_watch * watch,bool (* will_handle)(struct xenbus_watch *,const char *,const char *),void (* callback)(struct xenbus_watch *,const char *,const char *),const char * pathfmt,...)170*4882a593Smuzhiyun int xenbus_watch_pathfmt(struct xenbus_device *dev,
171*4882a593Smuzhiyun struct xenbus_watch *watch,
172*4882a593Smuzhiyun bool (*will_handle)(struct xenbus_watch *,
173*4882a593Smuzhiyun const char *, const char *),
174*4882a593Smuzhiyun void (*callback)(struct xenbus_watch *,
175*4882a593Smuzhiyun const char *, const char *),
176*4882a593Smuzhiyun const char *pathfmt, ...)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun int err;
179*4882a593Smuzhiyun va_list ap;
180*4882a593Smuzhiyun char *path;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun va_start(ap, pathfmt);
183*4882a593Smuzhiyun path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
184*4882a593Smuzhiyun va_end(ap);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (!path) {
187*4882a593Smuzhiyun xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
188*4882a593Smuzhiyun return -ENOMEM;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun err = xenbus_watch_path(dev, path, watch, will_handle, callback);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (err)
193*4882a593Smuzhiyun kfree(path);
194*4882a593Smuzhiyun return err;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun static void xenbus_switch_fatal(struct xenbus_device *, int, int,
199*4882a593Smuzhiyun const char *, ...);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun static int
__xenbus_switch_state(struct xenbus_device * dev,enum xenbus_state state,int depth)202*4882a593Smuzhiyun __xenbus_switch_state(struct xenbus_device *dev,
203*4882a593Smuzhiyun enum xenbus_state state, int depth)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun /* We check whether the state is currently set to the given value, and
206*4882a593Smuzhiyun if not, then the state is set. We don't want to unconditionally
207*4882a593Smuzhiyun write the given state, because we don't want to fire watches
208*4882a593Smuzhiyun unnecessarily. Furthermore, if the node has gone, we don't write
209*4882a593Smuzhiyun to it, as the device will be tearing down, and we don't want to
210*4882a593Smuzhiyun resurrect that directory.
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun Note that, because of this cached value of our state, this
213*4882a593Smuzhiyun function will not take a caller's Xenstore transaction
214*4882a593Smuzhiyun (something it was trying to in the past) because dev->state
215*4882a593Smuzhiyun would not get reset if the transaction was aborted.
216*4882a593Smuzhiyun */
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun struct xenbus_transaction xbt;
219*4882a593Smuzhiyun int current_state;
220*4882a593Smuzhiyun int err, abort;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (state == dev->state)
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun again:
226*4882a593Smuzhiyun abort = 1;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun err = xenbus_transaction_start(&xbt);
229*4882a593Smuzhiyun if (err) {
230*4882a593Smuzhiyun xenbus_switch_fatal(dev, depth, err, "starting transaction");
231*4882a593Smuzhiyun return 0;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun err = xenbus_scanf(xbt, dev->nodename, "state", "%d", ¤t_state);
235*4882a593Smuzhiyun if (err != 1)
236*4882a593Smuzhiyun goto abort;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
239*4882a593Smuzhiyun if (err) {
240*4882a593Smuzhiyun xenbus_switch_fatal(dev, depth, err, "writing new state");
241*4882a593Smuzhiyun goto abort;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun abort = 0;
245*4882a593Smuzhiyun abort:
246*4882a593Smuzhiyun err = xenbus_transaction_end(xbt, abort);
247*4882a593Smuzhiyun if (err) {
248*4882a593Smuzhiyun if (err == -EAGAIN && !abort)
249*4882a593Smuzhiyun goto again;
250*4882a593Smuzhiyun xenbus_switch_fatal(dev, depth, err, "ending transaction");
251*4882a593Smuzhiyun } else
252*4882a593Smuzhiyun dev->state = state;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun * xenbus_switch_state
259*4882a593Smuzhiyun * @dev: xenbus device
260*4882a593Smuzhiyun * @state: new state
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * Advertise in the store a change of the given driver to the given new_state.
263*4882a593Smuzhiyun * Return 0 on success, or -errno on error. On error, the device will switch
264*4882a593Smuzhiyun * to XenbusStateClosing, and the error will be saved in the store.
265*4882a593Smuzhiyun */
xenbus_switch_state(struct xenbus_device * dev,enum xenbus_state state)266*4882a593Smuzhiyun int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun return __xenbus_switch_state(dev, state, 0);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_switch_state);
272*4882a593Smuzhiyun
xenbus_frontend_closed(struct xenbus_device * dev)273*4882a593Smuzhiyun int xenbus_frontend_closed(struct xenbus_device *dev)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateClosed);
276*4882a593Smuzhiyun complete(&dev->down);
277*4882a593Smuzhiyun return 0;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_frontend_closed);
280*4882a593Smuzhiyun
xenbus_va_dev_error(struct xenbus_device * dev,int err,const char * fmt,va_list ap)281*4882a593Smuzhiyun static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
282*4882a593Smuzhiyun const char *fmt, va_list ap)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun unsigned int len;
285*4882a593Smuzhiyun char *printf_buffer;
286*4882a593Smuzhiyun char *path_buffer;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun #define PRINTF_BUFFER_SIZE 4096
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
291*4882a593Smuzhiyun if (!printf_buffer)
292*4882a593Smuzhiyun return;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun len = sprintf(printf_buffer, "%i ", -err);
295*4882a593Smuzhiyun vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun dev_err(&dev->dev, "%s\n", printf_buffer);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
300*4882a593Smuzhiyun if (path_buffer)
301*4882a593Smuzhiyun xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun kfree(printf_buffer);
304*4882a593Smuzhiyun kfree(path_buffer);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /**
308*4882a593Smuzhiyun * xenbus_dev_error
309*4882a593Smuzhiyun * @dev: xenbus device
310*4882a593Smuzhiyun * @err: error to report
311*4882a593Smuzhiyun * @fmt: error message format
312*4882a593Smuzhiyun *
313*4882a593Smuzhiyun * Report the given negative errno into the store, along with the given
314*4882a593Smuzhiyun * formatted message.
315*4882a593Smuzhiyun */
xenbus_dev_error(struct xenbus_device * dev,int err,const char * fmt,...)316*4882a593Smuzhiyun void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun va_list ap;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun va_start(ap, fmt);
321*4882a593Smuzhiyun xenbus_va_dev_error(dev, err, fmt, ap);
322*4882a593Smuzhiyun va_end(ap);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_dev_error);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /**
327*4882a593Smuzhiyun * xenbus_dev_fatal
328*4882a593Smuzhiyun * @dev: xenbus device
329*4882a593Smuzhiyun * @err: error to report
330*4882a593Smuzhiyun * @fmt: error message format
331*4882a593Smuzhiyun *
332*4882a593Smuzhiyun * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
333*4882a593Smuzhiyun * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
334*4882a593Smuzhiyun * closedown of this driver and its peer.
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun
xenbus_dev_fatal(struct xenbus_device * dev,int err,const char * fmt,...)337*4882a593Smuzhiyun void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun va_list ap;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun va_start(ap, fmt);
342*4882a593Smuzhiyun xenbus_va_dev_error(dev, err, fmt, ap);
343*4882a593Smuzhiyun va_end(ap);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateClosing);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_dev_fatal);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
351*4882a593Smuzhiyun * avoiding recursion within xenbus_switch_state.
352*4882a593Smuzhiyun */
xenbus_switch_fatal(struct xenbus_device * dev,int depth,int err,const char * fmt,...)353*4882a593Smuzhiyun static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
354*4882a593Smuzhiyun const char *fmt, ...)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun va_list ap;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun va_start(ap, fmt);
359*4882a593Smuzhiyun xenbus_va_dev_error(dev, err, fmt, ap);
360*4882a593Smuzhiyun va_end(ap);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (!depth)
363*4882a593Smuzhiyun __xenbus_switch_state(dev, XenbusStateClosing, 1);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun * xenbus_grant_ring
368*4882a593Smuzhiyun * @dev: xenbus device
369*4882a593Smuzhiyun * @vaddr: starting virtual address of the ring
370*4882a593Smuzhiyun * @nr_pages: number of pages to be granted
371*4882a593Smuzhiyun * @grefs: grant reference array to be filled in
372*4882a593Smuzhiyun *
373*4882a593Smuzhiyun * Grant access to the given @vaddr to the peer of the given device.
374*4882a593Smuzhiyun * Then fill in @grefs with grant references. Return 0 on success, or
375*4882a593Smuzhiyun * -errno on error. On error, the device will switch to
376*4882a593Smuzhiyun * XenbusStateClosing, and the error will be saved in the store.
377*4882a593Smuzhiyun */
xenbus_grant_ring(struct xenbus_device * dev,void * vaddr,unsigned int nr_pages,grant_ref_t * grefs)378*4882a593Smuzhiyun int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
379*4882a593Smuzhiyun unsigned int nr_pages, grant_ref_t *grefs)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun int err;
382*4882a593Smuzhiyun unsigned int i;
383*4882a593Smuzhiyun grant_ref_t gref_head;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun err = gnttab_alloc_grant_references(nr_pages, &gref_head);
386*4882a593Smuzhiyun if (err) {
387*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "granting access to ring page");
388*4882a593Smuzhiyun return err;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
392*4882a593Smuzhiyun unsigned long gfn;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (is_vmalloc_addr(vaddr))
395*4882a593Smuzhiyun gfn = pfn_to_gfn(vmalloc_to_pfn(vaddr));
396*4882a593Smuzhiyun else
397*4882a593Smuzhiyun gfn = virt_to_gfn(vaddr);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun grefs[i] = gnttab_claim_grant_reference(&gref_head);
400*4882a593Smuzhiyun gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id,
401*4882a593Smuzhiyun gfn, 0);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun vaddr = vaddr + XEN_PAGE_SIZE;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return 0;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_grant_ring);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /**
412*4882a593Smuzhiyun * Allocate an event channel for the given xenbus_device, assigning the newly
413*4882a593Smuzhiyun * created local port to *port. Return 0 on success, or -errno on error. On
414*4882a593Smuzhiyun * error, the device will switch to XenbusStateClosing, and the error will be
415*4882a593Smuzhiyun * saved in the store.
416*4882a593Smuzhiyun */
xenbus_alloc_evtchn(struct xenbus_device * dev,evtchn_port_t * port)417*4882a593Smuzhiyun int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun struct evtchn_alloc_unbound alloc_unbound;
420*4882a593Smuzhiyun int err;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun alloc_unbound.dom = DOMID_SELF;
423*4882a593Smuzhiyun alloc_unbound.remote_dom = dev->otherend_id;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
426*4882a593Smuzhiyun &alloc_unbound);
427*4882a593Smuzhiyun if (err)
428*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "allocating event channel");
429*4882a593Smuzhiyun else
430*4882a593Smuzhiyun *port = alloc_unbound.port;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun return err;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun * Free an existing event channel. Returns 0 on success or -errno on error.
439*4882a593Smuzhiyun */
xenbus_free_evtchn(struct xenbus_device * dev,evtchn_port_t port)440*4882a593Smuzhiyun int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun struct evtchn_close close;
443*4882a593Smuzhiyun int err;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun close.port = port;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
448*4882a593Smuzhiyun if (err)
449*4882a593Smuzhiyun xenbus_dev_error(dev, err, "freeing event channel %u", port);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return err;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /**
457*4882a593Smuzhiyun * xenbus_map_ring_valloc
458*4882a593Smuzhiyun * @dev: xenbus device
459*4882a593Smuzhiyun * @gnt_refs: grant reference array
460*4882a593Smuzhiyun * @nr_grefs: number of grant references
461*4882a593Smuzhiyun * @vaddr: pointer to address to be filled out by mapping
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * Map @nr_grefs pages of memory into this domain from another
464*4882a593Smuzhiyun * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
465*4882a593Smuzhiyun * pages of virtual address space, maps the pages to that address, and
466*4882a593Smuzhiyun * sets *vaddr to that address. Returns 0 on success, and -errno on
467*4882a593Smuzhiyun * error. If an error is returned, device will switch to
468*4882a593Smuzhiyun * XenbusStateClosing and the error message will be saved in XenStore.
469*4882a593Smuzhiyun */
xenbus_map_ring_valloc(struct xenbus_device * dev,grant_ref_t * gnt_refs,unsigned int nr_grefs,void ** vaddr)470*4882a593Smuzhiyun int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
471*4882a593Smuzhiyun unsigned int nr_grefs, void **vaddr)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun int err;
474*4882a593Smuzhiyun struct map_ring_valloc *info;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun *vaddr = NULL;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (nr_grefs > XENBUS_MAX_RING_GRANTS)
479*4882a593Smuzhiyun return -EINVAL;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun info = kzalloc(sizeof(*info), GFP_KERNEL);
482*4882a593Smuzhiyun if (!info)
483*4882a593Smuzhiyun return -ENOMEM;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
486*4882a593Smuzhiyun if (!info->node)
487*4882a593Smuzhiyun err = -ENOMEM;
488*4882a593Smuzhiyun else
489*4882a593Smuzhiyun err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun kfree(info->node);
492*4882a593Smuzhiyun kfree(info);
493*4882a593Smuzhiyun return err;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
498*4882a593Smuzhiyun * long), e.g. 32-on-64. Caller is responsible for preparing the
499*4882a593Smuzhiyun * right array to feed into this function */
__xenbus_map_ring(struct xenbus_device * dev,grant_ref_t * gnt_refs,unsigned int nr_grefs,grant_handle_t * handles,struct map_ring_valloc * info,unsigned int flags,bool * leaked)500*4882a593Smuzhiyun static int __xenbus_map_ring(struct xenbus_device *dev,
501*4882a593Smuzhiyun grant_ref_t *gnt_refs,
502*4882a593Smuzhiyun unsigned int nr_grefs,
503*4882a593Smuzhiyun grant_handle_t *handles,
504*4882a593Smuzhiyun struct map_ring_valloc *info,
505*4882a593Smuzhiyun unsigned int flags,
506*4882a593Smuzhiyun bool *leaked)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun int i, j;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (nr_grefs > XENBUS_MAX_RING_GRANTS)
511*4882a593Smuzhiyun return -EINVAL;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun for (i = 0; i < nr_grefs; i++) {
514*4882a593Smuzhiyun gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
515*4882a593Smuzhiyun gnt_refs[i], dev->otherend_id);
516*4882a593Smuzhiyun handles[i] = INVALID_GRANT_HANDLE;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun gnttab_batch_map(info->map, i);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun for (i = 0; i < nr_grefs; i++) {
522*4882a593Smuzhiyun if (info->map[i].status != GNTST_okay) {
523*4882a593Smuzhiyun xenbus_dev_fatal(dev, info->map[i].status,
524*4882a593Smuzhiyun "mapping in shared page %d from domain %d",
525*4882a593Smuzhiyun gnt_refs[i], dev->otherend_id);
526*4882a593Smuzhiyun goto fail;
527*4882a593Smuzhiyun } else
528*4882a593Smuzhiyun handles[i] = info->map[i].handle;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun return 0;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun fail:
534*4882a593Smuzhiyun for (i = j = 0; i < nr_grefs; i++) {
535*4882a593Smuzhiyun if (handles[i] != INVALID_GRANT_HANDLE) {
536*4882a593Smuzhiyun gnttab_set_unmap_op(&info->unmap[j],
537*4882a593Smuzhiyun info->phys_addrs[i],
538*4882a593Smuzhiyun GNTMAP_host_map, handles[i]);
539*4882a593Smuzhiyun j++;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
544*4882a593Smuzhiyun BUG();
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun *leaked = false;
547*4882a593Smuzhiyun for (i = 0; i < j; i++) {
548*4882a593Smuzhiyun if (info->unmap[i].status != GNTST_okay) {
549*4882a593Smuzhiyun *leaked = true;
550*4882a593Smuzhiyun break;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun return -ENOENT;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun * xenbus_unmap_ring
559*4882a593Smuzhiyun * @dev: xenbus device
560*4882a593Smuzhiyun * @handles: grant handle array
561*4882a593Smuzhiyun * @nr_handles: number of handles in the array
562*4882a593Smuzhiyun * @vaddrs: addresses to unmap
563*4882a593Smuzhiyun *
564*4882a593Smuzhiyun * Unmap memory in this domain that was imported from another domain.
565*4882a593Smuzhiyun * Returns 0 on success and returns GNTST_* on error
566*4882a593Smuzhiyun * (see xen/include/interface/grant_table.h).
567*4882a593Smuzhiyun */
xenbus_unmap_ring(struct xenbus_device * dev,grant_handle_t * handles,unsigned int nr_handles,unsigned long * vaddrs)568*4882a593Smuzhiyun static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
569*4882a593Smuzhiyun unsigned int nr_handles, unsigned long *vaddrs)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
572*4882a593Smuzhiyun int i;
573*4882a593Smuzhiyun int err;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (nr_handles > XENBUS_MAX_RING_GRANTS)
576*4882a593Smuzhiyun return -EINVAL;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun for (i = 0; i < nr_handles; i++)
579*4882a593Smuzhiyun gnttab_set_unmap_op(&unmap[i], vaddrs[i],
580*4882a593Smuzhiyun GNTMAP_host_map, handles[i]);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
583*4882a593Smuzhiyun BUG();
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun err = GNTST_okay;
586*4882a593Smuzhiyun for (i = 0; i < nr_handles; i++) {
587*4882a593Smuzhiyun if (unmap[i].status != GNTST_okay) {
588*4882a593Smuzhiyun xenbus_dev_error(dev, unmap[i].status,
589*4882a593Smuzhiyun "unmapping page at handle %d error %d",
590*4882a593Smuzhiyun handles[i], unmap[i].status);
591*4882a593Smuzhiyun err = unmap[i].status;
592*4882a593Smuzhiyun break;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun return err;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
xenbus_map_ring_setup_grant_hvm(unsigned long gfn,unsigned int goffset,unsigned int len,void * data)599*4882a593Smuzhiyun static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
600*4882a593Smuzhiyun unsigned int goffset,
601*4882a593Smuzhiyun unsigned int len,
602*4882a593Smuzhiyun void *data)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct map_ring_valloc *info = data;
605*4882a593Smuzhiyun unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun info->phys_addrs[info->idx] = vaddr;
608*4882a593Smuzhiyun info->addrs[info->idx] = vaddr;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun info->idx++;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
xenbus_map_ring_hvm(struct xenbus_device * dev,struct map_ring_valloc * info,grant_ref_t * gnt_ref,unsigned int nr_grefs,void ** vaddr)613*4882a593Smuzhiyun static int xenbus_map_ring_hvm(struct xenbus_device *dev,
614*4882a593Smuzhiyun struct map_ring_valloc *info,
615*4882a593Smuzhiyun grant_ref_t *gnt_ref,
616*4882a593Smuzhiyun unsigned int nr_grefs,
617*4882a593Smuzhiyun void **vaddr)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun struct xenbus_map_node *node = info->node;
620*4882a593Smuzhiyun int err;
621*4882a593Smuzhiyun void *addr;
622*4882a593Smuzhiyun bool leaked = false;
623*4882a593Smuzhiyun unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
626*4882a593Smuzhiyun if (err)
627*4882a593Smuzhiyun goto out_err;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun gnttab_foreach_grant(node->hvm.pages, nr_grefs,
630*4882a593Smuzhiyun xenbus_map_ring_setup_grant_hvm,
631*4882a593Smuzhiyun info);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
634*4882a593Smuzhiyun info, GNTMAP_host_map, &leaked);
635*4882a593Smuzhiyun node->nr_handles = nr_grefs;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun if (err)
638*4882a593Smuzhiyun goto out_free_ballooned_pages;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
641*4882a593Smuzhiyun PAGE_KERNEL);
642*4882a593Smuzhiyun if (!addr) {
643*4882a593Smuzhiyun err = -ENOMEM;
644*4882a593Smuzhiyun goto out_xenbus_unmap_ring;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun node->hvm.addr = addr;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun spin_lock(&xenbus_valloc_lock);
650*4882a593Smuzhiyun list_add(&node->next, &xenbus_valloc_pages);
651*4882a593Smuzhiyun spin_unlock(&xenbus_valloc_lock);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun *vaddr = addr;
654*4882a593Smuzhiyun info->node = NULL;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun return 0;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun out_xenbus_unmap_ring:
659*4882a593Smuzhiyun if (!leaked)
660*4882a593Smuzhiyun xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
661*4882a593Smuzhiyun else
662*4882a593Smuzhiyun pr_alert("leaking %p size %u page(s)",
663*4882a593Smuzhiyun addr, nr_pages);
664*4882a593Smuzhiyun out_free_ballooned_pages:
665*4882a593Smuzhiyun if (!leaked)
666*4882a593Smuzhiyun xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
667*4882a593Smuzhiyun out_err:
668*4882a593Smuzhiyun return err;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /**
672*4882a593Smuzhiyun * xenbus_unmap_ring_vfree
673*4882a593Smuzhiyun * @dev: xenbus device
674*4882a593Smuzhiyun * @vaddr: addr to unmap
675*4882a593Smuzhiyun *
676*4882a593Smuzhiyun * Based on Rusty Russell's skeleton driver's unmap_page.
677*4882a593Smuzhiyun * Unmap a page of memory in this domain that was imported from another domain.
678*4882a593Smuzhiyun * Use xenbus_unmap_ring_vfree if you mapped in your memory with
679*4882a593Smuzhiyun * xenbus_map_ring_valloc (it will free the virtual address space).
680*4882a593Smuzhiyun * Returns 0 on success and returns GNTST_* on error
681*4882a593Smuzhiyun * (see xen/include/interface/grant_table.h).
682*4882a593Smuzhiyun */
xenbus_unmap_ring_vfree(struct xenbus_device * dev,void * vaddr)683*4882a593Smuzhiyun int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun return ring_ops->unmap(dev, vaddr);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun #ifdef CONFIG_XEN_PV
map_ring_apply(pte_t * pte,unsigned long addr,void * data)690*4882a593Smuzhiyun static int map_ring_apply(pte_t *pte, unsigned long addr, void *data)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun struct map_ring_valloc *info = data;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr;
695*4882a593Smuzhiyun return 0;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
xenbus_map_ring_pv(struct xenbus_device * dev,struct map_ring_valloc * info,grant_ref_t * gnt_refs,unsigned int nr_grefs,void ** vaddr)698*4882a593Smuzhiyun static int xenbus_map_ring_pv(struct xenbus_device *dev,
699*4882a593Smuzhiyun struct map_ring_valloc *info,
700*4882a593Smuzhiyun grant_ref_t *gnt_refs,
701*4882a593Smuzhiyun unsigned int nr_grefs,
702*4882a593Smuzhiyun void **vaddr)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun struct xenbus_map_node *node = info->node;
705*4882a593Smuzhiyun struct vm_struct *area;
706*4882a593Smuzhiyun bool leaked = false;
707*4882a593Smuzhiyun int err = -ENOMEM;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP);
710*4882a593Smuzhiyun if (!area)
711*4882a593Smuzhiyun return -ENOMEM;
712*4882a593Smuzhiyun if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
713*4882a593Smuzhiyun XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info))
714*4882a593Smuzhiyun goto failed;
715*4882a593Smuzhiyun err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
716*4882a593Smuzhiyun info, GNTMAP_host_map | GNTMAP_contains_pte,
717*4882a593Smuzhiyun &leaked);
718*4882a593Smuzhiyun if (err)
719*4882a593Smuzhiyun goto failed;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun node->nr_handles = nr_grefs;
722*4882a593Smuzhiyun node->pv.area = area;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun spin_lock(&xenbus_valloc_lock);
725*4882a593Smuzhiyun list_add(&node->next, &xenbus_valloc_pages);
726*4882a593Smuzhiyun spin_unlock(&xenbus_valloc_lock);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun *vaddr = area->addr;
729*4882a593Smuzhiyun info->node = NULL;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun return 0;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun failed:
734*4882a593Smuzhiyun if (!leaked)
735*4882a593Smuzhiyun free_vm_area(area);
736*4882a593Smuzhiyun else
737*4882a593Smuzhiyun pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun return err;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
xenbus_unmap_ring_pv(struct xenbus_device * dev,void * vaddr)742*4882a593Smuzhiyun static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun struct xenbus_map_node *node;
745*4882a593Smuzhiyun struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
746*4882a593Smuzhiyun unsigned int level;
747*4882a593Smuzhiyun int i;
748*4882a593Smuzhiyun bool leaked = false;
749*4882a593Smuzhiyun int err;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun spin_lock(&xenbus_valloc_lock);
752*4882a593Smuzhiyun list_for_each_entry(node, &xenbus_valloc_pages, next) {
753*4882a593Smuzhiyun if (node->pv.area->addr == vaddr) {
754*4882a593Smuzhiyun list_del(&node->next);
755*4882a593Smuzhiyun goto found;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun node = NULL;
759*4882a593Smuzhiyun found:
760*4882a593Smuzhiyun spin_unlock(&xenbus_valloc_lock);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (!node) {
763*4882a593Smuzhiyun xenbus_dev_error(dev, -ENOENT,
764*4882a593Smuzhiyun "can't find mapped virtual address %p", vaddr);
765*4882a593Smuzhiyun return GNTST_bad_virt_addr;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun for (i = 0; i < node->nr_handles; i++) {
769*4882a593Smuzhiyun unsigned long addr;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun memset(&unmap[i], 0, sizeof(unmap[i]));
772*4882a593Smuzhiyun addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i);
773*4882a593Smuzhiyun unmap[i].host_addr = arbitrary_virt_to_machine(
774*4882a593Smuzhiyun lookup_address(addr, &level)).maddr;
775*4882a593Smuzhiyun unmap[i].dev_bus_addr = 0;
776*4882a593Smuzhiyun unmap[i].handle = node->handles[i];
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
780*4882a593Smuzhiyun BUG();
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun err = GNTST_okay;
783*4882a593Smuzhiyun leaked = false;
784*4882a593Smuzhiyun for (i = 0; i < node->nr_handles; i++) {
785*4882a593Smuzhiyun if (unmap[i].status != GNTST_okay) {
786*4882a593Smuzhiyun leaked = true;
787*4882a593Smuzhiyun xenbus_dev_error(dev, unmap[i].status,
788*4882a593Smuzhiyun "unmapping page at handle %d error %d",
789*4882a593Smuzhiyun node->handles[i], unmap[i].status);
790*4882a593Smuzhiyun err = unmap[i].status;
791*4882a593Smuzhiyun break;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (!leaked)
796*4882a593Smuzhiyun free_vm_area(node->pv.area);
797*4882a593Smuzhiyun else
798*4882a593Smuzhiyun pr_alert("leaking VM area %p size %u page(s)",
799*4882a593Smuzhiyun node->pv.area, node->nr_handles);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun kfree(node);
802*4882a593Smuzhiyun return err;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun static const struct xenbus_ring_ops ring_ops_pv = {
806*4882a593Smuzhiyun .map = xenbus_map_ring_pv,
807*4882a593Smuzhiyun .unmap = xenbus_unmap_ring_pv,
808*4882a593Smuzhiyun };
809*4882a593Smuzhiyun #endif
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun struct unmap_ring_hvm
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun unsigned int idx;
814*4882a593Smuzhiyun unsigned long addrs[XENBUS_MAX_RING_GRANTS];
815*4882a593Smuzhiyun };
816*4882a593Smuzhiyun
xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,unsigned int goffset,unsigned int len,void * data)817*4882a593Smuzhiyun static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
818*4882a593Smuzhiyun unsigned int goffset,
819*4882a593Smuzhiyun unsigned int len,
820*4882a593Smuzhiyun void *data)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun struct unmap_ring_hvm *info = data;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun info->idx++;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
xenbus_unmap_ring_hvm(struct xenbus_device * dev,void * vaddr)829*4882a593Smuzhiyun static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun int rv;
832*4882a593Smuzhiyun struct xenbus_map_node *node;
833*4882a593Smuzhiyun void *addr;
834*4882a593Smuzhiyun struct unmap_ring_hvm info = {
835*4882a593Smuzhiyun .idx = 0,
836*4882a593Smuzhiyun };
837*4882a593Smuzhiyun unsigned int nr_pages;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun spin_lock(&xenbus_valloc_lock);
840*4882a593Smuzhiyun list_for_each_entry(node, &xenbus_valloc_pages, next) {
841*4882a593Smuzhiyun addr = node->hvm.addr;
842*4882a593Smuzhiyun if (addr == vaddr) {
843*4882a593Smuzhiyun list_del(&node->next);
844*4882a593Smuzhiyun goto found;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun node = addr = NULL;
848*4882a593Smuzhiyun found:
849*4882a593Smuzhiyun spin_unlock(&xenbus_valloc_lock);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun if (!node) {
852*4882a593Smuzhiyun xenbus_dev_error(dev, -ENOENT,
853*4882a593Smuzhiyun "can't find mapped virtual address %p", vaddr);
854*4882a593Smuzhiyun return GNTST_bad_virt_addr;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun nr_pages = XENBUS_PAGES(node->nr_handles);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
860*4882a593Smuzhiyun xenbus_unmap_ring_setup_grant_hvm,
861*4882a593Smuzhiyun &info);
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
864*4882a593Smuzhiyun info.addrs);
865*4882a593Smuzhiyun if (!rv) {
866*4882a593Smuzhiyun vunmap(vaddr);
867*4882a593Smuzhiyun xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun else
870*4882a593Smuzhiyun WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun kfree(node);
873*4882a593Smuzhiyun return rv;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /**
877*4882a593Smuzhiyun * xenbus_read_driver_state
878*4882a593Smuzhiyun * @path: path for driver
879*4882a593Smuzhiyun *
880*4882a593Smuzhiyun * Return the state of the driver rooted at the given store path, or
881*4882a593Smuzhiyun * XenbusStateUnknown if no state can be read.
882*4882a593Smuzhiyun */
xenbus_read_driver_state(const char * path)883*4882a593Smuzhiyun enum xenbus_state xenbus_read_driver_state(const char *path)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun enum xenbus_state result;
886*4882a593Smuzhiyun int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
887*4882a593Smuzhiyun if (err)
888*4882a593Smuzhiyun result = XenbusStateUnknown;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun return result;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun static const struct xenbus_ring_ops ring_ops_hvm = {
895*4882a593Smuzhiyun .map = xenbus_map_ring_hvm,
896*4882a593Smuzhiyun .unmap = xenbus_unmap_ring_hvm,
897*4882a593Smuzhiyun };
898*4882a593Smuzhiyun
xenbus_ring_ops_init(void)899*4882a593Smuzhiyun void __init xenbus_ring_ops_init(void)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun #ifdef CONFIG_XEN_PV
902*4882a593Smuzhiyun if (!xen_feature(XENFEAT_auto_translated_physmap))
903*4882a593Smuzhiyun ring_ops = &ring_ops_pv;
904*4882a593Smuzhiyun else
905*4882a593Smuzhiyun #endif
906*4882a593Smuzhiyun ring_ops = &ring_ops_hvm;
907*4882a593Smuzhiyun }
908