1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun * xenbus_xs.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This is the kernel equivalent of the "xs" library. We don't need everything
5*4882a593Smuzhiyun * and we use xenbus_comms for communication.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2005 Rusty Russell, IBM Corporation
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
10*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License version 2
11*4882a593Smuzhiyun * as published by the Free Software Foundation; or, when distributed
12*4882a593Smuzhiyun * separately from the Linux kernel or incorporated into other
13*4882a593Smuzhiyun * software packages, subject to the following license:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a copy
16*4882a593Smuzhiyun * of this source file (the "Software"), to deal in the Software without
17*4882a593Smuzhiyun * restriction, including without limitation the rights to use, copy, modify,
18*4882a593Smuzhiyun * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19*4882a593Smuzhiyun * and to permit persons to whom the Software is furnished to do so, subject to
20*4882a593Smuzhiyun * the following conditions:
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
23*4882a593Smuzhiyun * all copies or substantial portions of the Software.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28*4882a593Smuzhiyun * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31*4882a593Smuzhiyun * IN THE SOFTWARE.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include <linux/unistd.h>
37*4882a593Smuzhiyun #include <linux/errno.h>
38*4882a593Smuzhiyun #include <linux/types.h>
39*4882a593Smuzhiyun #include <linux/uio.h>
40*4882a593Smuzhiyun #include <linux/kernel.h>
41*4882a593Smuzhiyun #include <linux/string.h>
42*4882a593Smuzhiyun #include <linux/err.h>
43*4882a593Smuzhiyun #include <linux/slab.h>
44*4882a593Smuzhiyun #include <linux/fcntl.h>
45*4882a593Smuzhiyun #include <linux/kthread.h>
46*4882a593Smuzhiyun #include <linux/reboot.h>
47*4882a593Smuzhiyun #include <linux/rwsem.h>
48*4882a593Smuzhiyun #include <linux/mutex.h>
49*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
50*4882a593Smuzhiyun #include <xen/xenbus.h>
51*4882a593Smuzhiyun #include <xen/xen.h>
52*4882a593Smuzhiyun #include "xenbus.h"
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Framework to protect suspend/resume handling against normal Xenstore
56*4882a593Smuzhiyun * message handling:
57*4882a593Smuzhiyun * During suspend/resume there must be no open transaction and no pending
58*4882a593Smuzhiyun * Xenstore request.
59*4882a593Smuzhiyun * New watch events happening in this time can be ignored by firing all watches
60*4882a593Smuzhiyun * after resume.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Lock protecting enter/exit critical region. */
64*4882a593Smuzhiyun static DEFINE_SPINLOCK(xs_state_lock);
65*4882a593Smuzhiyun /* Number of users in critical region (protected by xs_state_lock). */
66*4882a593Smuzhiyun static unsigned int xs_state_users;
67*4882a593Smuzhiyun /* Suspend handler waiting or already active (protected by xs_state_lock)? */
68*4882a593Smuzhiyun static int xs_suspend_active;
69*4882a593Smuzhiyun /* Unique Xenstore request id (protected by xs_state_lock). */
70*4882a593Smuzhiyun static uint32_t xs_request_id;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Wait queue for all callers waiting for critical region to become usable. */
73*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(xs_state_enter_wq);
74*4882a593Smuzhiyun /* Wait queue for suspend handling waiting for critical region being empty. */
75*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(xs_state_exit_wq);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* List of registered watches, and a lock to protect it. */
78*4882a593Smuzhiyun static LIST_HEAD(watches);
79*4882a593Smuzhiyun static DEFINE_SPINLOCK(watches_lock);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* List of pending watch callback events, and a lock to protect it. */
82*4882a593Smuzhiyun static LIST_HEAD(watch_events);
83*4882a593Smuzhiyun static DEFINE_SPINLOCK(watch_events_lock);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* Protect watch (de)register against save/restore. */
86*4882a593Smuzhiyun static DECLARE_RWSEM(xs_watch_rwsem);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * Details of the xenwatch callback kernel thread. The thread waits on the
90*4882a593Smuzhiyun * watch_events_waitq for work to do (queued on watch_events list). When it
91*4882a593Smuzhiyun * wakes up it acquires the xenwatch_mutex before reading the list and
92*4882a593Smuzhiyun * carrying out work.
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun static pid_t xenwatch_pid;
95*4882a593Smuzhiyun static DEFINE_MUTEX(xenwatch_mutex);
96*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(watch_events_waitq);
97*4882a593Smuzhiyun
xs_suspend_enter(void)98*4882a593Smuzhiyun static void xs_suspend_enter(void)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun spin_lock(&xs_state_lock);
101*4882a593Smuzhiyun xs_suspend_active++;
102*4882a593Smuzhiyun spin_unlock(&xs_state_lock);
103*4882a593Smuzhiyun wait_event(xs_state_exit_wq, xs_state_users == 0);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
xs_suspend_exit(void)106*4882a593Smuzhiyun static void xs_suspend_exit(void)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun xb_dev_generation_id++;
109*4882a593Smuzhiyun spin_lock(&xs_state_lock);
110*4882a593Smuzhiyun xs_suspend_active--;
111*4882a593Smuzhiyun spin_unlock(&xs_state_lock);
112*4882a593Smuzhiyun wake_up_all(&xs_state_enter_wq);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
xs_request_enter(struct xb_req_data * req)115*4882a593Smuzhiyun static uint32_t xs_request_enter(struct xb_req_data *req)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun uint32_t rq_id;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun req->type = req->msg.type;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun spin_lock(&xs_state_lock);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun while (!xs_state_users && xs_suspend_active) {
124*4882a593Smuzhiyun spin_unlock(&xs_state_lock);
125*4882a593Smuzhiyun wait_event(xs_state_enter_wq, xs_suspend_active == 0);
126*4882a593Smuzhiyun spin_lock(&xs_state_lock);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (req->type == XS_TRANSACTION_START && !req->user_req)
130*4882a593Smuzhiyun xs_state_users++;
131*4882a593Smuzhiyun xs_state_users++;
132*4882a593Smuzhiyun rq_id = xs_request_id++;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun spin_unlock(&xs_state_lock);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return rq_id;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
xs_request_exit(struct xb_req_data * req)139*4882a593Smuzhiyun void xs_request_exit(struct xb_req_data *req)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun spin_lock(&xs_state_lock);
142*4882a593Smuzhiyun xs_state_users--;
143*4882a593Smuzhiyun if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
144*4882a593Smuzhiyun (req->type == XS_TRANSACTION_END && !req->user_req &&
145*4882a593Smuzhiyun !WARN_ON_ONCE(req->msg.type == XS_ERROR &&
146*4882a593Smuzhiyun !strcmp(req->body, "ENOENT"))))
147*4882a593Smuzhiyun xs_state_users--;
148*4882a593Smuzhiyun spin_unlock(&xs_state_lock);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (xs_suspend_active && !xs_state_users)
151*4882a593Smuzhiyun wake_up(&xs_state_exit_wq);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
get_error(const char * errorstring)154*4882a593Smuzhiyun static int get_error(const char *errorstring)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun unsigned int i;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun for (i = 0; strcmp(errorstring, xsd_errors[i].errstring) != 0; i++) {
159*4882a593Smuzhiyun if (i == ARRAY_SIZE(xsd_errors) - 1) {
160*4882a593Smuzhiyun pr_warn("xen store gave: unknown error %s\n",
161*4882a593Smuzhiyun errorstring);
162*4882a593Smuzhiyun return EINVAL;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun return xsd_errors[i].errnum;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
xenbus_ok(void)168*4882a593Smuzhiyun static bool xenbus_ok(void)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun switch (xen_store_domain_type) {
171*4882a593Smuzhiyun case XS_LOCAL:
172*4882a593Smuzhiyun switch (system_state) {
173*4882a593Smuzhiyun case SYSTEM_POWER_OFF:
174*4882a593Smuzhiyun case SYSTEM_RESTART:
175*4882a593Smuzhiyun case SYSTEM_HALT:
176*4882a593Smuzhiyun return false;
177*4882a593Smuzhiyun default:
178*4882a593Smuzhiyun break;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun return true;
181*4882a593Smuzhiyun case XS_PV:
182*4882a593Smuzhiyun case XS_HVM:
183*4882a593Smuzhiyun /* FIXME: Could check that the remote domain is alive,
184*4882a593Smuzhiyun * but it is normally initial domain. */
185*4882a593Smuzhiyun return true;
186*4882a593Smuzhiyun default:
187*4882a593Smuzhiyun break;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun return false;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
test_reply(struct xb_req_data * req)192*4882a593Smuzhiyun static bool test_reply(struct xb_req_data *req)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun if (req->state == xb_req_state_got_reply || !xenbus_ok()) {
195*4882a593Smuzhiyun /* read req->state before all other fields */
196*4882a593Smuzhiyun virt_rmb();
197*4882a593Smuzhiyun return true;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* Make sure to reread req->state each time. */
201*4882a593Smuzhiyun barrier();
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun return false;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
read_reply(struct xb_req_data * req)206*4882a593Smuzhiyun static void *read_reply(struct xb_req_data *req)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun do {
209*4882a593Smuzhiyun wait_event(req->wq, test_reply(req));
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (!xenbus_ok())
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun * If we are in the process of being shut-down there is
214*4882a593Smuzhiyun * no point of trying to contact XenBus - it is either
215*4882a593Smuzhiyun * killed (xenstored application) or the other domain
216*4882a593Smuzhiyun * has been killed or is unreachable.
217*4882a593Smuzhiyun */
218*4882a593Smuzhiyun return ERR_PTR(-EIO);
219*4882a593Smuzhiyun if (req->err)
220*4882a593Smuzhiyun return ERR_PTR(req->err);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun } while (req->state != xb_req_state_got_reply);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return req->body;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
xs_send(struct xb_req_data * req,struct xsd_sockmsg * msg)227*4882a593Smuzhiyun static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun bool notify;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun req->msg = *msg;
232*4882a593Smuzhiyun req->err = 0;
233*4882a593Smuzhiyun req->state = xb_req_state_queued;
234*4882a593Smuzhiyun init_waitqueue_head(&req->wq);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* Save the caller req_id and restore it later in the reply */
237*4882a593Smuzhiyun req->caller_req_id = req->msg.req_id;
238*4882a593Smuzhiyun req->msg.req_id = xs_request_enter(req);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun mutex_lock(&xb_write_mutex);
241*4882a593Smuzhiyun list_add_tail(&req->list, &xb_write_list);
242*4882a593Smuzhiyun notify = list_is_singular(&xb_write_list);
243*4882a593Smuzhiyun mutex_unlock(&xb_write_mutex);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (notify)
246*4882a593Smuzhiyun wake_up(&xb_waitq);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
xs_wait_for_reply(struct xb_req_data * req,struct xsd_sockmsg * msg)249*4882a593Smuzhiyun static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun void *ret;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun ret = read_reply(req);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun xs_request_exit(req);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun msg->type = req->msg.type;
258*4882a593Smuzhiyun msg->len = req->msg.len;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun mutex_lock(&xb_write_mutex);
261*4882a593Smuzhiyun if (req->state == xb_req_state_queued ||
262*4882a593Smuzhiyun req->state == xb_req_state_wait_reply)
263*4882a593Smuzhiyun req->state = xb_req_state_aborted;
264*4882a593Smuzhiyun else
265*4882a593Smuzhiyun kfree(req);
266*4882a593Smuzhiyun mutex_unlock(&xb_write_mutex);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return ret;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
xs_wake_up(struct xb_req_data * req)271*4882a593Smuzhiyun static void xs_wake_up(struct xb_req_data *req)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun wake_up(&req->wq);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
xenbus_dev_request_and_reply(struct xsd_sockmsg * msg,void * par)276*4882a593Smuzhiyun int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct xb_req_data *req;
279*4882a593Smuzhiyun struct kvec *vec;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun req = kmalloc(sizeof(*req) + sizeof(*vec), GFP_KERNEL);
282*4882a593Smuzhiyun if (!req)
283*4882a593Smuzhiyun return -ENOMEM;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun vec = (struct kvec *)(req + 1);
286*4882a593Smuzhiyun vec->iov_len = msg->len;
287*4882a593Smuzhiyun vec->iov_base = msg + 1;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun req->vec = vec;
290*4882a593Smuzhiyun req->num_vecs = 1;
291*4882a593Smuzhiyun req->cb = xenbus_dev_queue_reply;
292*4882a593Smuzhiyun req->par = par;
293*4882a593Smuzhiyun req->user_req = true;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun xs_send(req, msg);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun return 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun EXPORT_SYMBOL(xenbus_dev_request_and_reply);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */
xs_talkv(struct xenbus_transaction t,enum xsd_sockmsg_type type,const struct kvec * iovec,unsigned int num_vecs,unsigned int * len)302*4882a593Smuzhiyun static void *xs_talkv(struct xenbus_transaction t,
303*4882a593Smuzhiyun enum xsd_sockmsg_type type,
304*4882a593Smuzhiyun const struct kvec *iovec,
305*4882a593Smuzhiyun unsigned int num_vecs,
306*4882a593Smuzhiyun unsigned int *len)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct xb_req_data *req;
309*4882a593Smuzhiyun struct xsd_sockmsg msg;
310*4882a593Smuzhiyun void *ret = NULL;
311*4882a593Smuzhiyun unsigned int i;
312*4882a593Smuzhiyun int err;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun req = kmalloc(sizeof(*req), GFP_NOIO | __GFP_HIGH);
315*4882a593Smuzhiyun if (!req)
316*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun req->vec = iovec;
319*4882a593Smuzhiyun req->num_vecs = num_vecs;
320*4882a593Smuzhiyun req->cb = xs_wake_up;
321*4882a593Smuzhiyun req->user_req = false;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun msg.req_id = 0;
324*4882a593Smuzhiyun msg.tx_id = t.id;
325*4882a593Smuzhiyun msg.type = type;
326*4882a593Smuzhiyun msg.len = 0;
327*4882a593Smuzhiyun for (i = 0; i < num_vecs; i++)
328*4882a593Smuzhiyun msg.len += iovec[i].iov_len;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun xs_send(req, &msg);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun ret = xs_wait_for_reply(req, &msg);
333*4882a593Smuzhiyun if (len)
334*4882a593Smuzhiyun *len = msg.len;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (IS_ERR(ret))
337*4882a593Smuzhiyun return ret;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (msg.type == XS_ERROR) {
340*4882a593Smuzhiyun err = get_error(ret);
341*4882a593Smuzhiyun kfree(ret);
342*4882a593Smuzhiyun return ERR_PTR(-err);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (msg.type != type) {
346*4882a593Smuzhiyun pr_warn_ratelimited("unexpected type [%d], expected [%d]\n",
347*4882a593Smuzhiyun msg.type, type);
348*4882a593Smuzhiyun kfree(ret);
349*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun return ret;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Simplified version of xs_talkv: single message. */
xs_single(struct xenbus_transaction t,enum xsd_sockmsg_type type,const char * string,unsigned int * len)355*4882a593Smuzhiyun static void *xs_single(struct xenbus_transaction t,
356*4882a593Smuzhiyun enum xsd_sockmsg_type type,
357*4882a593Smuzhiyun const char *string,
358*4882a593Smuzhiyun unsigned int *len)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun struct kvec iovec;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun iovec.iov_base = (void *)string;
363*4882a593Smuzhiyun iovec.iov_len = strlen(string) + 1;
364*4882a593Smuzhiyun return xs_talkv(t, type, &iovec, 1, len);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* Many commands only need an ack, don't care what it says. */
xs_error(char * reply)368*4882a593Smuzhiyun static int xs_error(char *reply)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun if (IS_ERR(reply))
371*4882a593Smuzhiyun return PTR_ERR(reply);
372*4882a593Smuzhiyun kfree(reply);
373*4882a593Smuzhiyun return 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
count_strings(const char * strings,unsigned int len)376*4882a593Smuzhiyun static unsigned int count_strings(const char *strings, unsigned int len)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun unsigned int num;
379*4882a593Smuzhiyun const char *p;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1)
382*4882a593Smuzhiyun num++;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun return num;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* Return the path to dir with /name appended. Buffer must be kfree()'ed. */
join(const char * dir,const char * name)388*4882a593Smuzhiyun static char *join(const char *dir, const char *name)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun char *buffer;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun if (strlen(name) == 0)
393*4882a593Smuzhiyun buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s", dir);
394*4882a593Smuzhiyun else
395*4882a593Smuzhiyun buffer = kasprintf(GFP_NOIO | __GFP_HIGH, "%s/%s", dir, name);
396*4882a593Smuzhiyun return (!buffer) ? ERR_PTR(-ENOMEM) : buffer;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
split(char * strings,unsigned int len,unsigned int * num)399*4882a593Smuzhiyun static char **split(char *strings, unsigned int len, unsigned int *num)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun char *p, **ret;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Count the strings. */
404*4882a593Smuzhiyun *num = count_strings(strings, len);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* Transfer to one big alloc for easy freeing. */
407*4882a593Smuzhiyun ret = kmalloc(*num * sizeof(char *) + len, GFP_NOIO | __GFP_HIGH);
408*4882a593Smuzhiyun if (!ret) {
409*4882a593Smuzhiyun kfree(strings);
410*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun memcpy(&ret[*num], strings, len);
413*4882a593Smuzhiyun kfree(strings);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun strings = (char *)&ret[*num];
416*4882a593Smuzhiyun for (p = strings, *num = 0; p < strings + len; p += strlen(p) + 1)
417*4882a593Smuzhiyun ret[(*num)++] = p;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun return ret;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
xenbus_directory(struct xenbus_transaction t,const char * dir,const char * node,unsigned int * num)422*4882a593Smuzhiyun char **xenbus_directory(struct xenbus_transaction t,
423*4882a593Smuzhiyun const char *dir, const char *node, unsigned int *num)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun char *strings, *path;
426*4882a593Smuzhiyun unsigned int len;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun path = join(dir, node);
429*4882a593Smuzhiyun if (IS_ERR(path))
430*4882a593Smuzhiyun return (char **)path;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun strings = xs_single(t, XS_DIRECTORY, path, &len);
433*4882a593Smuzhiyun kfree(path);
434*4882a593Smuzhiyun if (IS_ERR(strings))
435*4882a593Smuzhiyun return (char **)strings;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun return split(strings, len, num);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_directory);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* Check if a path exists. Return 1 if it does. */
xenbus_exists(struct xenbus_transaction t,const char * dir,const char * node)442*4882a593Smuzhiyun int xenbus_exists(struct xenbus_transaction t,
443*4882a593Smuzhiyun const char *dir, const char *node)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun char **d;
446*4882a593Smuzhiyun int dir_n;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun d = xenbus_directory(t, dir, node, &dir_n);
449*4882a593Smuzhiyun if (IS_ERR(d))
450*4882a593Smuzhiyun return 0;
451*4882a593Smuzhiyun kfree(d);
452*4882a593Smuzhiyun return 1;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_exists);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* Get the value of a single file.
457*4882a593Smuzhiyun * Returns a kmalloced value: call free() on it after use.
458*4882a593Smuzhiyun * len indicates length in bytes.
459*4882a593Smuzhiyun */
xenbus_read(struct xenbus_transaction t,const char * dir,const char * node,unsigned int * len)460*4882a593Smuzhiyun void *xenbus_read(struct xenbus_transaction t,
461*4882a593Smuzhiyun const char *dir, const char *node, unsigned int *len)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun char *path;
464*4882a593Smuzhiyun void *ret;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun path = join(dir, node);
467*4882a593Smuzhiyun if (IS_ERR(path))
468*4882a593Smuzhiyun return (void *)path;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun ret = xs_single(t, XS_READ, path, len);
471*4882a593Smuzhiyun kfree(path);
472*4882a593Smuzhiyun return ret;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_read);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* Write the value of a single file.
477*4882a593Smuzhiyun * Returns -err on failure.
478*4882a593Smuzhiyun */
xenbus_write(struct xenbus_transaction t,const char * dir,const char * node,const char * string)479*4882a593Smuzhiyun int xenbus_write(struct xenbus_transaction t,
480*4882a593Smuzhiyun const char *dir, const char *node, const char *string)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun const char *path;
483*4882a593Smuzhiyun struct kvec iovec[2];
484*4882a593Smuzhiyun int ret;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun path = join(dir, node);
487*4882a593Smuzhiyun if (IS_ERR(path))
488*4882a593Smuzhiyun return PTR_ERR(path);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun iovec[0].iov_base = (void *)path;
491*4882a593Smuzhiyun iovec[0].iov_len = strlen(path) + 1;
492*4882a593Smuzhiyun iovec[1].iov_base = (void *)string;
493*4882a593Smuzhiyun iovec[1].iov_len = strlen(string);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun ret = xs_error(xs_talkv(t, XS_WRITE, iovec, ARRAY_SIZE(iovec), NULL));
496*4882a593Smuzhiyun kfree(path);
497*4882a593Smuzhiyun return ret;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_write);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* Create a new directory. */
xenbus_mkdir(struct xenbus_transaction t,const char * dir,const char * node)502*4882a593Smuzhiyun int xenbus_mkdir(struct xenbus_transaction t,
503*4882a593Smuzhiyun const char *dir, const char *node)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun char *path;
506*4882a593Smuzhiyun int ret;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun path = join(dir, node);
509*4882a593Smuzhiyun if (IS_ERR(path))
510*4882a593Smuzhiyun return PTR_ERR(path);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun ret = xs_error(xs_single(t, XS_MKDIR, path, NULL));
513*4882a593Smuzhiyun kfree(path);
514*4882a593Smuzhiyun return ret;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_mkdir);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /* Destroy a file or directory (directories must be empty). */
xenbus_rm(struct xenbus_transaction t,const char * dir,const char * node)519*4882a593Smuzhiyun int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun char *path;
522*4882a593Smuzhiyun int ret;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun path = join(dir, node);
525*4882a593Smuzhiyun if (IS_ERR(path))
526*4882a593Smuzhiyun return PTR_ERR(path);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun ret = xs_error(xs_single(t, XS_RM, path, NULL));
529*4882a593Smuzhiyun kfree(path);
530*4882a593Smuzhiyun return ret;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_rm);
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun /* Start a transaction: changes by others will not be seen during this
535*4882a593Smuzhiyun * transaction, and changes will not be visible to others until end.
536*4882a593Smuzhiyun */
xenbus_transaction_start(struct xenbus_transaction * t)537*4882a593Smuzhiyun int xenbus_transaction_start(struct xenbus_transaction *t)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun char *id_str;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL);
542*4882a593Smuzhiyun if (IS_ERR(id_str))
543*4882a593Smuzhiyun return PTR_ERR(id_str);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun t->id = simple_strtoul(id_str, NULL, 0);
546*4882a593Smuzhiyun kfree(id_str);
547*4882a593Smuzhiyun return 0;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_transaction_start);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* End a transaction.
552*4882a593Smuzhiyun * If abandon is true, transaction is discarded instead of committed.
553*4882a593Smuzhiyun */
xenbus_transaction_end(struct xenbus_transaction t,int abort)554*4882a593Smuzhiyun int xenbus_transaction_end(struct xenbus_transaction t, int abort)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun char abortstr[2];
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (abort)
559*4882a593Smuzhiyun strcpy(abortstr, "F");
560*4882a593Smuzhiyun else
561*4882a593Smuzhiyun strcpy(abortstr, "T");
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL));
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_transaction_end);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /* Single read and scanf: returns -errno or num scanned. */
xenbus_scanf(struct xenbus_transaction t,const char * dir,const char * node,const char * fmt,...)568*4882a593Smuzhiyun int xenbus_scanf(struct xenbus_transaction t,
569*4882a593Smuzhiyun const char *dir, const char *node, const char *fmt, ...)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun va_list ap;
572*4882a593Smuzhiyun int ret;
573*4882a593Smuzhiyun char *val;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun val = xenbus_read(t, dir, node, NULL);
576*4882a593Smuzhiyun if (IS_ERR(val))
577*4882a593Smuzhiyun return PTR_ERR(val);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun va_start(ap, fmt);
580*4882a593Smuzhiyun ret = vsscanf(val, fmt, ap);
581*4882a593Smuzhiyun va_end(ap);
582*4882a593Smuzhiyun kfree(val);
583*4882a593Smuzhiyun /* Distinctive errno. */
584*4882a593Smuzhiyun if (ret == 0)
585*4882a593Smuzhiyun return -ERANGE;
586*4882a593Smuzhiyun return ret;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_scanf);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /* Read an (optional) unsigned value. */
xenbus_read_unsigned(const char * dir,const char * node,unsigned int default_val)591*4882a593Smuzhiyun unsigned int xenbus_read_unsigned(const char *dir, const char *node,
592*4882a593Smuzhiyun unsigned int default_val)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun unsigned int val;
595*4882a593Smuzhiyun int ret;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val);
598*4882a593Smuzhiyun if (ret <= 0)
599*4882a593Smuzhiyun val = default_val;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun return val;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_read_unsigned);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /* Single printf and write: returns -errno or 0. */
xenbus_printf(struct xenbus_transaction t,const char * dir,const char * node,const char * fmt,...)606*4882a593Smuzhiyun int xenbus_printf(struct xenbus_transaction t,
607*4882a593Smuzhiyun const char *dir, const char *node, const char *fmt, ...)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun va_list ap;
610*4882a593Smuzhiyun int ret;
611*4882a593Smuzhiyun char *buf;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun va_start(ap, fmt);
614*4882a593Smuzhiyun buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap);
615*4882a593Smuzhiyun va_end(ap);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (!buf)
618*4882a593Smuzhiyun return -ENOMEM;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun ret = xenbus_write(t, dir, node, buf);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun kfree(buf);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun return ret;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_printf);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* Takes tuples of names, scanf-style args, and void **, NULL terminated. */
xenbus_gather(struct xenbus_transaction t,const char * dir,...)629*4882a593Smuzhiyun int xenbus_gather(struct xenbus_transaction t, const char *dir, ...)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun va_list ap;
632*4882a593Smuzhiyun const char *name;
633*4882a593Smuzhiyun int ret = 0;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun va_start(ap, dir);
636*4882a593Smuzhiyun while (ret == 0 && (name = va_arg(ap, char *)) != NULL) {
637*4882a593Smuzhiyun const char *fmt = va_arg(ap, char *);
638*4882a593Smuzhiyun void *result = va_arg(ap, void *);
639*4882a593Smuzhiyun char *p;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun p = xenbus_read(t, dir, name, NULL);
642*4882a593Smuzhiyun if (IS_ERR(p)) {
643*4882a593Smuzhiyun ret = PTR_ERR(p);
644*4882a593Smuzhiyun break;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun if (fmt) {
647*4882a593Smuzhiyun if (sscanf(p, fmt, result) == 0)
648*4882a593Smuzhiyun ret = -EINVAL;
649*4882a593Smuzhiyun kfree(p);
650*4882a593Smuzhiyun } else
651*4882a593Smuzhiyun *(char **)result = p;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun va_end(ap);
654*4882a593Smuzhiyun return ret;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xenbus_gather);
657*4882a593Smuzhiyun
xs_watch(const char * path,const char * token)658*4882a593Smuzhiyun static int xs_watch(const char *path, const char *token)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun struct kvec iov[2];
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun iov[0].iov_base = (void *)path;
663*4882a593Smuzhiyun iov[0].iov_len = strlen(path) + 1;
664*4882a593Smuzhiyun iov[1].iov_base = (void *)token;
665*4882a593Smuzhiyun iov[1].iov_len = strlen(token) + 1;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun return xs_error(xs_talkv(XBT_NIL, XS_WATCH, iov,
668*4882a593Smuzhiyun ARRAY_SIZE(iov), NULL));
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
xs_unwatch(const char * path,const char * token)671*4882a593Smuzhiyun static int xs_unwatch(const char *path, const char *token)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun struct kvec iov[2];
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun iov[0].iov_base = (char *)path;
676*4882a593Smuzhiyun iov[0].iov_len = strlen(path) + 1;
677*4882a593Smuzhiyun iov[1].iov_base = (char *)token;
678*4882a593Smuzhiyun iov[1].iov_len = strlen(token) + 1;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun return xs_error(xs_talkv(XBT_NIL, XS_UNWATCH, iov,
681*4882a593Smuzhiyun ARRAY_SIZE(iov), NULL));
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
find_watch(const char * token)684*4882a593Smuzhiyun static struct xenbus_watch *find_watch(const char *token)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun struct xenbus_watch *i, *cmp;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun cmp = (void *)simple_strtoul(token, NULL, 16);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun list_for_each_entry(i, &watches, list)
691*4882a593Smuzhiyun if (i == cmp)
692*4882a593Smuzhiyun return i;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun return NULL;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
xs_watch_msg(struct xs_watch_event * event)697*4882a593Smuzhiyun int xs_watch_msg(struct xs_watch_event *event)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun if (count_strings(event->body, event->len) != 2) {
700*4882a593Smuzhiyun kfree(event);
701*4882a593Smuzhiyun return -EINVAL;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun event->path = (const char *)event->body;
704*4882a593Smuzhiyun event->token = (const char *)strchr(event->body, '\0') + 1;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun spin_lock(&watches_lock);
707*4882a593Smuzhiyun event->handle = find_watch(event->token);
708*4882a593Smuzhiyun if (event->handle != NULL &&
709*4882a593Smuzhiyun (!event->handle->will_handle ||
710*4882a593Smuzhiyun event->handle->will_handle(event->handle,
711*4882a593Smuzhiyun event->path, event->token))) {
712*4882a593Smuzhiyun spin_lock(&watch_events_lock);
713*4882a593Smuzhiyun list_add_tail(&event->list, &watch_events);
714*4882a593Smuzhiyun event->handle->nr_pending++;
715*4882a593Smuzhiyun wake_up(&watch_events_waitq);
716*4882a593Smuzhiyun spin_unlock(&watch_events_lock);
717*4882a593Smuzhiyun } else
718*4882a593Smuzhiyun kfree(event);
719*4882a593Smuzhiyun spin_unlock(&watches_lock);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun return 0;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /*
725*4882a593Smuzhiyun * Certain older XenBus toolstack cannot handle reading values that are
726*4882a593Smuzhiyun * not populated. Some Xen 3.4 installation are incapable of doing this
727*4882a593Smuzhiyun * so if we are running on anything older than 4 do not attempt to read
728*4882a593Smuzhiyun * control/platform-feature-xs_reset_watches.
729*4882a593Smuzhiyun */
xen_strict_xenbus_quirk(void)730*4882a593Smuzhiyun static bool xen_strict_xenbus_quirk(void)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun #ifdef CONFIG_X86
733*4882a593Smuzhiyun uint32_t eax, ebx, ecx, edx, base;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun base = xen_cpuid_base();
736*4882a593Smuzhiyun cpuid(base + 1, &eax, &ebx, &ecx, &edx);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun if ((eax >> 16) < 4)
739*4882a593Smuzhiyun return true;
740*4882a593Smuzhiyun #endif
741*4882a593Smuzhiyun return false;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun }
xs_reset_watches(void)744*4882a593Smuzhiyun static void xs_reset_watches(void)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun int err;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun if (!xen_hvm_domain() || xen_initial_domain())
749*4882a593Smuzhiyun return;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun if (xen_strict_xenbus_quirk())
752*4882a593Smuzhiyun return;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun if (!xenbus_read_unsigned("control",
755*4882a593Smuzhiyun "platform-feature-xs_reset_watches", 0))
756*4882a593Smuzhiyun return;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
759*4882a593Smuzhiyun if (err && err != -EEXIST)
760*4882a593Smuzhiyun pr_warn("xs_reset_watches failed: %d\n", err);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /* Register callback to watch this node. */
register_xenbus_watch(struct xenbus_watch * watch)764*4882a593Smuzhiyun int register_xenbus_watch(struct xenbus_watch *watch)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun /* Pointer in ascii is the token. */
767*4882a593Smuzhiyun char token[sizeof(watch) * 2 + 1];
768*4882a593Smuzhiyun int err;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun sprintf(token, "%lX", (long)watch);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun watch->nr_pending = 0;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun down_read(&xs_watch_rwsem);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun spin_lock(&watches_lock);
777*4882a593Smuzhiyun BUG_ON(find_watch(token));
778*4882a593Smuzhiyun list_add(&watch->list, &watches);
779*4882a593Smuzhiyun spin_unlock(&watches_lock);
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun err = xs_watch(watch->node, token);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun if (err) {
784*4882a593Smuzhiyun spin_lock(&watches_lock);
785*4882a593Smuzhiyun list_del(&watch->list);
786*4882a593Smuzhiyun spin_unlock(&watches_lock);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun up_read(&xs_watch_rwsem);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun return err;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(register_xenbus_watch);
794*4882a593Smuzhiyun
unregister_xenbus_watch(struct xenbus_watch * watch)795*4882a593Smuzhiyun void unregister_xenbus_watch(struct xenbus_watch *watch)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun struct xs_watch_event *event, *tmp;
798*4882a593Smuzhiyun char token[sizeof(watch) * 2 + 1];
799*4882a593Smuzhiyun int err;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun sprintf(token, "%lX", (long)watch);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun down_read(&xs_watch_rwsem);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun spin_lock(&watches_lock);
806*4882a593Smuzhiyun BUG_ON(!find_watch(token));
807*4882a593Smuzhiyun list_del(&watch->list);
808*4882a593Smuzhiyun spin_unlock(&watches_lock);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun err = xs_unwatch(watch->node, token);
811*4882a593Smuzhiyun if (err)
812*4882a593Smuzhiyun pr_warn("Failed to release watch %s: %i\n", watch->node, err);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun up_read(&xs_watch_rwsem);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /* Make sure there are no callbacks running currently (unless
817*4882a593Smuzhiyun its us) */
818*4882a593Smuzhiyun if (current->pid != xenwatch_pid)
819*4882a593Smuzhiyun mutex_lock(&xenwatch_mutex);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* Cancel pending watch events. */
822*4882a593Smuzhiyun spin_lock(&watch_events_lock);
823*4882a593Smuzhiyun if (watch->nr_pending) {
824*4882a593Smuzhiyun list_for_each_entry_safe(event, tmp, &watch_events, list) {
825*4882a593Smuzhiyun if (event->handle != watch)
826*4882a593Smuzhiyun continue;
827*4882a593Smuzhiyun list_del(&event->list);
828*4882a593Smuzhiyun kfree(event);
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun watch->nr_pending = 0;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun spin_unlock(&watch_events_lock);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun if (current->pid != xenwatch_pid)
835*4882a593Smuzhiyun mutex_unlock(&xenwatch_mutex);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(unregister_xenbus_watch);
838*4882a593Smuzhiyun
xs_suspend(void)839*4882a593Smuzhiyun void xs_suspend(void)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun xs_suspend_enter();
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun down_write(&xs_watch_rwsem);
844*4882a593Smuzhiyun mutex_lock(&xs_response_mutex);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
xs_resume(void)847*4882a593Smuzhiyun void xs_resume(void)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun struct xenbus_watch *watch;
850*4882a593Smuzhiyun char token[sizeof(watch) * 2 + 1];
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun xb_init_comms();
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun mutex_unlock(&xs_response_mutex);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun xs_suspend_exit();
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* No need for watches_lock: the xs_watch_rwsem is sufficient. */
859*4882a593Smuzhiyun list_for_each_entry(watch, &watches, list) {
860*4882a593Smuzhiyun sprintf(token, "%lX", (long)watch);
861*4882a593Smuzhiyun xs_watch(watch->node, token);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun up_write(&xs_watch_rwsem);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
xs_suspend_cancel(void)867*4882a593Smuzhiyun void xs_suspend_cancel(void)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun mutex_unlock(&xs_response_mutex);
870*4882a593Smuzhiyun up_write(&xs_watch_rwsem);
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun xs_suspend_exit();
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
xenwatch_thread(void * unused)875*4882a593Smuzhiyun static int xenwatch_thread(void *unused)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun struct xs_watch_event *event;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun xenwatch_pid = current->pid;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun for (;;) {
882*4882a593Smuzhiyun wait_event_interruptible(watch_events_waitq,
883*4882a593Smuzhiyun !list_empty(&watch_events));
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (kthread_should_stop())
886*4882a593Smuzhiyun break;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun mutex_lock(&xenwatch_mutex);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun spin_lock(&watch_events_lock);
891*4882a593Smuzhiyun event = list_first_entry_or_null(&watch_events,
892*4882a593Smuzhiyun struct xs_watch_event, list);
893*4882a593Smuzhiyun if (event) {
894*4882a593Smuzhiyun list_del(&event->list);
895*4882a593Smuzhiyun event->handle->nr_pending--;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun spin_unlock(&watch_events_lock);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun if (event) {
900*4882a593Smuzhiyun event->handle->callback(event->handle, event->path,
901*4882a593Smuzhiyun event->token);
902*4882a593Smuzhiyun kfree(event);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun mutex_unlock(&xenwatch_mutex);
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun return 0;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /*
912*4882a593Smuzhiyun * Wake up all threads waiting for a xenstore reply. In case of shutdown all
913*4882a593Smuzhiyun * pending replies will be marked as "aborted" in order to let the waiters
914*4882a593Smuzhiyun * return in spite of xenstore possibly no longer being able to reply. This
915*4882a593Smuzhiyun * will avoid blocking shutdown by a thread waiting for xenstore but being
916*4882a593Smuzhiyun * necessary for shutdown processing to proceed.
917*4882a593Smuzhiyun */
xs_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)918*4882a593Smuzhiyun static int xs_reboot_notify(struct notifier_block *nb,
919*4882a593Smuzhiyun unsigned long code, void *unused)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun struct xb_req_data *req;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun mutex_lock(&xb_write_mutex);
924*4882a593Smuzhiyun list_for_each_entry(req, &xs_reply_list, list)
925*4882a593Smuzhiyun wake_up(&req->wq);
926*4882a593Smuzhiyun list_for_each_entry(req, &xb_write_list, list)
927*4882a593Smuzhiyun wake_up(&req->wq);
928*4882a593Smuzhiyun mutex_unlock(&xb_write_mutex);
929*4882a593Smuzhiyun return NOTIFY_DONE;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun static struct notifier_block xs_reboot_nb = {
933*4882a593Smuzhiyun .notifier_call = xs_reboot_notify,
934*4882a593Smuzhiyun };
935*4882a593Smuzhiyun
xs_init(void)936*4882a593Smuzhiyun int xs_init(void)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun int err;
939*4882a593Smuzhiyun struct task_struct *task;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun register_reboot_notifier(&xs_reboot_nb);
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun /* Initialize the shared memory rings to talk to xenstored */
944*4882a593Smuzhiyun err = xb_init_comms();
945*4882a593Smuzhiyun if (err)
946*4882a593Smuzhiyun return err;
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun task = kthread_run(xenwatch_thread, NULL, "xenwatch");
949*4882a593Smuzhiyun if (IS_ERR(task))
950*4882a593Smuzhiyun return PTR_ERR(task);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /* shutdown watches for kexec boot */
953*4882a593Smuzhiyun xs_reset_watches();
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun return 0;
956*4882a593Smuzhiyun }
957