1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/inet.h>
7*4882a593Smuzhiyun #include <linux/kthread.h>
8*4882a593Smuzhiyun #include <linux/list.h>
9*4882a593Smuzhiyun #include <linux/radix-tree.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/semaphore.h>
12*4882a593Smuzhiyun #include <linux/wait.h>
13*4882a593Smuzhiyun #include <net/sock.h>
14*4882a593Smuzhiyun #include <net/inet_common.h>
15*4882a593Smuzhiyun #include <net/inet_connection_sock.h>
16*4882a593Smuzhiyun #include <net/request_sock.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <xen/events.h>
19*4882a593Smuzhiyun #include <xen/grant_table.h>
20*4882a593Smuzhiyun #include <xen/xen.h>
21*4882a593Smuzhiyun #include <xen/xenbus.h>
22*4882a593Smuzhiyun #include <xen/interface/io/pvcalls.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define PVCALLS_VERSIONS "1"
25*4882a593Smuzhiyun #define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static struct pvcalls_back_global {
28*4882a593Smuzhiyun struct list_head frontends;
29*4882a593Smuzhiyun struct semaphore frontends_lock;
30*4882a593Smuzhiyun } pvcalls_back_global;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * Per-frontend data structure. It contains pointers to the command
34*4882a593Smuzhiyun * ring, its event channel, a list of active sockets and a tree of
35*4882a593Smuzhiyun * passive sockets.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun struct pvcalls_fedata {
38*4882a593Smuzhiyun struct list_head list;
39*4882a593Smuzhiyun struct xenbus_device *dev;
40*4882a593Smuzhiyun struct xen_pvcalls_sring *sring;
41*4882a593Smuzhiyun struct xen_pvcalls_back_ring ring;
42*4882a593Smuzhiyun int irq;
43*4882a593Smuzhiyun struct list_head socket_mappings;
44*4882a593Smuzhiyun struct radix_tree_root socketpass_mappings;
45*4882a593Smuzhiyun struct semaphore socket_lock;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun struct pvcalls_ioworker {
49*4882a593Smuzhiyun struct work_struct register_work;
50*4882a593Smuzhiyun struct workqueue_struct *wq;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct sock_mapping {
54*4882a593Smuzhiyun struct list_head list;
55*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
56*4882a593Smuzhiyun struct sockpass_mapping *sockpass;
57*4882a593Smuzhiyun struct socket *sock;
58*4882a593Smuzhiyun uint64_t id;
59*4882a593Smuzhiyun grant_ref_t ref;
60*4882a593Smuzhiyun struct pvcalls_data_intf *ring;
61*4882a593Smuzhiyun void *bytes;
62*4882a593Smuzhiyun struct pvcalls_data data;
63*4882a593Smuzhiyun uint32_t ring_order;
64*4882a593Smuzhiyun int irq;
65*4882a593Smuzhiyun atomic_t read;
66*4882a593Smuzhiyun atomic_t write;
67*4882a593Smuzhiyun atomic_t io;
68*4882a593Smuzhiyun atomic_t release;
69*4882a593Smuzhiyun atomic_t eoi;
70*4882a593Smuzhiyun void (*saved_data_ready)(struct sock *sk);
71*4882a593Smuzhiyun struct pvcalls_ioworker ioworker;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun struct sockpass_mapping {
75*4882a593Smuzhiyun struct list_head list;
76*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
77*4882a593Smuzhiyun struct socket *sock;
78*4882a593Smuzhiyun uint64_t id;
79*4882a593Smuzhiyun struct xen_pvcalls_request reqcopy;
80*4882a593Smuzhiyun spinlock_t copy_lock;
81*4882a593Smuzhiyun struct workqueue_struct *wq;
82*4882a593Smuzhiyun struct work_struct register_work;
83*4882a593Smuzhiyun void (*saved_data_ready)(struct sock *sk);
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
87*4882a593Smuzhiyun static int pvcalls_back_release_active(struct xenbus_device *dev,
88*4882a593Smuzhiyun struct pvcalls_fedata *fedata,
89*4882a593Smuzhiyun struct sock_mapping *map);
90*4882a593Smuzhiyun
pvcalls_conn_back_read(void * opaque)91*4882a593Smuzhiyun static bool pvcalls_conn_back_read(void *opaque)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct sock_mapping *map = (struct sock_mapping *)opaque;
94*4882a593Smuzhiyun struct msghdr msg;
95*4882a593Smuzhiyun struct kvec vec[2];
96*4882a593Smuzhiyun RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
97*4882a593Smuzhiyun int32_t error;
98*4882a593Smuzhiyun struct pvcalls_data_intf *intf = map->ring;
99*4882a593Smuzhiyun struct pvcalls_data *data = &map->data;
100*4882a593Smuzhiyun unsigned long flags;
101*4882a593Smuzhiyun int ret;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun array_size = XEN_FLEX_RING_SIZE(map->ring_order);
104*4882a593Smuzhiyun cons = intf->in_cons;
105*4882a593Smuzhiyun prod = intf->in_prod;
106*4882a593Smuzhiyun error = intf->in_error;
107*4882a593Smuzhiyun /* read the indexes first, then deal with the data */
108*4882a593Smuzhiyun virt_mb();
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (error)
111*4882a593Smuzhiyun return false;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun size = pvcalls_queued(prod, cons, array_size);
114*4882a593Smuzhiyun if (size >= array_size)
115*4882a593Smuzhiyun return false;
116*4882a593Smuzhiyun spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
117*4882a593Smuzhiyun if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
118*4882a593Smuzhiyun atomic_set(&map->read, 0);
119*4882a593Smuzhiyun spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
120*4882a593Smuzhiyun flags);
121*4882a593Smuzhiyun return true;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
124*4882a593Smuzhiyun wanted = array_size - size;
125*4882a593Smuzhiyun masked_prod = pvcalls_mask(prod, array_size);
126*4882a593Smuzhiyun masked_cons = pvcalls_mask(cons, array_size);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun memset(&msg, 0, sizeof(msg));
129*4882a593Smuzhiyun if (masked_prod < masked_cons) {
130*4882a593Smuzhiyun vec[0].iov_base = data->in + masked_prod;
131*4882a593Smuzhiyun vec[0].iov_len = wanted;
132*4882a593Smuzhiyun iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted);
133*4882a593Smuzhiyun } else {
134*4882a593Smuzhiyun vec[0].iov_base = data->in + masked_prod;
135*4882a593Smuzhiyun vec[0].iov_len = array_size - masked_prod;
136*4882a593Smuzhiyun vec[1].iov_base = data->in;
137*4882a593Smuzhiyun vec[1].iov_len = wanted - vec[0].iov_len;
138*4882a593Smuzhiyun iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun atomic_set(&map->read, 0);
142*4882a593Smuzhiyun ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
143*4882a593Smuzhiyun WARN_ON(ret > wanted);
144*4882a593Smuzhiyun if (ret == -EAGAIN) /* shouldn't happen */
145*4882a593Smuzhiyun return true;
146*4882a593Smuzhiyun if (!ret)
147*4882a593Smuzhiyun ret = -ENOTCONN;
148*4882a593Smuzhiyun spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
149*4882a593Smuzhiyun if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
150*4882a593Smuzhiyun atomic_inc(&map->read);
151*4882a593Smuzhiyun spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* write the data, then modify the indexes */
154*4882a593Smuzhiyun virt_wmb();
155*4882a593Smuzhiyun if (ret < 0) {
156*4882a593Smuzhiyun atomic_set(&map->read, 0);
157*4882a593Smuzhiyun intf->in_error = ret;
158*4882a593Smuzhiyun } else
159*4882a593Smuzhiyun intf->in_prod = prod + ret;
160*4882a593Smuzhiyun /* update the indexes, then notify the other end */
161*4882a593Smuzhiyun virt_wmb();
162*4882a593Smuzhiyun notify_remote_via_irq(map->irq);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return true;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
pvcalls_conn_back_write(struct sock_mapping * map)167*4882a593Smuzhiyun static bool pvcalls_conn_back_write(struct sock_mapping *map)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct pvcalls_data_intf *intf = map->ring;
170*4882a593Smuzhiyun struct pvcalls_data *data = &map->data;
171*4882a593Smuzhiyun struct msghdr msg;
172*4882a593Smuzhiyun struct kvec vec[2];
173*4882a593Smuzhiyun RING_IDX cons, prod, size, array_size;
174*4882a593Smuzhiyun int ret;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun cons = intf->out_cons;
177*4882a593Smuzhiyun prod = intf->out_prod;
178*4882a593Smuzhiyun /* read the indexes before dealing with the data */
179*4882a593Smuzhiyun virt_mb();
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun array_size = XEN_FLEX_RING_SIZE(map->ring_order);
182*4882a593Smuzhiyun size = pvcalls_queued(prod, cons, array_size);
183*4882a593Smuzhiyun if (size == 0)
184*4882a593Smuzhiyun return false;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun memset(&msg, 0, sizeof(msg));
187*4882a593Smuzhiyun msg.msg_flags |= MSG_DONTWAIT;
188*4882a593Smuzhiyun if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
189*4882a593Smuzhiyun vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
190*4882a593Smuzhiyun vec[0].iov_len = size;
191*4882a593Smuzhiyun iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size);
192*4882a593Smuzhiyun } else {
193*4882a593Smuzhiyun vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
194*4882a593Smuzhiyun vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
195*4882a593Smuzhiyun vec[1].iov_base = data->out;
196*4882a593Smuzhiyun vec[1].iov_len = size - vec[0].iov_len;
197*4882a593Smuzhiyun iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun atomic_set(&map->write, 0);
201*4882a593Smuzhiyun ret = inet_sendmsg(map->sock, &msg, size);
202*4882a593Smuzhiyun if (ret == -EAGAIN) {
203*4882a593Smuzhiyun atomic_inc(&map->write);
204*4882a593Smuzhiyun atomic_inc(&map->io);
205*4882a593Smuzhiyun return true;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* write the data, then update the indexes */
209*4882a593Smuzhiyun virt_wmb();
210*4882a593Smuzhiyun if (ret < 0) {
211*4882a593Smuzhiyun intf->out_error = ret;
212*4882a593Smuzhiyun } else {
213*4882a593Smuzhiyun intf->out_error = 0;
214*4882a593Smuzhiyun intf->out_cons = cons + ret;
215*4882a593Smuzhiyun prod = intf->out_prod;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun /* update the indexes, then notify the other end */
218*4882a593Smuzhiyun virt_wmb();
219*4882a593Smuzhiyun if (prod != cons + ret) {
220*4882a593Smuzhiyun atomic_inc(&map->write);
221*4882a593Smuzhiyun atomic_inc(&map->io);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun notify_remote_via_irq(map->irq);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return true;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
pvcalls_back_ioworker(struct work_struct * work)228*4882a593Smuzhiyun static void pvcalls_back_ioworker(struct work_struct *work)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun struct pvcalls_ioworker *ioworker = container_of(work,
231*4882a593Smuzhiyun struct pvcalls_ioworker, register_work);
232*4882a593Smuzhiyun struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
233*4882a593Smuzhiyun ioworker);
234*4882a593Smuzhiyun unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun while (atomic_read(&map->io) > 0) {
237*4882a593Smuzhiyun if (atomic_read(&map->release) > 0) {
238*4882a593Smuzhiyun atomic_set(&map->release, 0);
239*4882a593Smuzhiyun return;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (atomic_read(&map->read) > 0 &&
243*4882a593Smuzhiyun pvcalls_conn_back_read(map))
244*4882a593Smuzhiyun eoi_flags = 0;
245*4882a593Smuzhiyun if (atomic_read(&map->write) > 0 &&
246*4882a593Smuzhiyun pvcalls_conn_back_write(map))
247*4882a593Smuzhiyun eoi_flags = 0;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
250*4882a593Smuzhiyun atomic_set(&map->eoi, 0);
251*4882a593Smuzhiyun xen_irq_lateeoi(map->irq, eoi_flags);
252*4882a593Smuzhiyun eoi_flags = XEN_EOI_FLAG_SPURIOUS;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun atomic_dec(&map->io);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
pvcalls_back_socket(struct xenbus_device * dev,struct xen_pvcalls_request * req)259*4882a593Smuzhiyun static int pvcalls_back_socket(struct xenbus_device *dev,
260*4882a593Smuzhiyun struct xen_pvcalls_request *req)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
263*4882a593Smuzhiyun int ret;
264*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (req->u.socket.domain != AF_INET ||
269*4882a593Smuzhiyun req->u.socket.type != SOCK_STREAM ||
270*4882a593Smuzhiyun (req->u.socket.protocol != IPPROTO_IP &&
271*4882a593Smuzhiyun req->u.socket.protocol != AF_INET))
272*4882a593Smuzhiyun ret = -EAFNOSUPPORT;
273*4882a593Smuzhiyun else
274*4882a593Smuzhiyun ret = 0;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* leave the actual socket allocation for later */
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
279*4882a593Smuzhiyun rsp->req_id = req->req_id;
280*4882a593Smuzhiyun rsp->cmd = req->cmd;
281*4882a593Smuzhiyun rsp->u.socket.id = req->u.socket.id;
282*4882a593Smuzhiyun rsp->ret = ret;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun return 0;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
pvcalls_sk_state_change(struct sock * sock)287*4882a593Smuzhiyun static void pvcalls_sk_state_change(struct sock *sock)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun struct sock_mapping *map = sock->sk_user_data;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (map == NULL)
292*4882a593Smuzhiyun return;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun atomic_inc(&map->read);
295*4882a593Smuzhiyun notify_remote_via_irq(map->irq);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
pvcalls_sk_data_ready(struct sock * sock)298*4882a593Smuzhiyun static void pvcalls_sk_data_ready(struct sock *sock)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct sock_mapping *map = sock->sk_user_data;
301*4882a593Smuzhiyun struct pvcalls_ioworker *iow;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (map == NULL)
304*4882a593Smuzhiyun return;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun iow = &map->ioworker;
307*4882a593Smuzhiyun atomic_inc(&map->read);
308*4882a593Smuzhiyun atomic_inc(&map->io);
309*4882a593Smuzhiyun queue_work(iow->wq, &iow->register_work);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
pvcalls_new_active_socket(struct pvcalls_fedata * fedata,uint64_t id,grant_ref_t ref,evtchn_port_t evtchn,struct socket * sock)312*4882a593Smuzhiyun static struct sock_mapping *pvcalls_new_active_socket(
313*4882a593Smuzhiyun struct pvcalls_fedata *fedata,
314*4882a593Smuzhiyun uint64_t id,
315*4882a593Smuzhiyun grant_ref_t ref,
316*4882a593Smuzhiyun evtchn_port_t evtchn,
317*4882a593Smuzhiyun struct socket *sock)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun int ret;
320*4882a593Smuzhiyun struct sock_mapping *map;
321*4882a593Smuzhiyun void *page;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun map = kzalloc(sizeof(*map), GFP_KERNEL);
324*4882a593Smuzhiyun if (map == NULL)
325*4882a593Smuzhiyun return NULL;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun map->fedata = fedata;
328*4882a593Smuzhiyun map->sock = sock;
329*4882a593Smuzhiyun map->id = id;
330*4882a593Smuzhiyun map->ref = ref;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
333*4882a593Smuzhiyun if (ret < 0)
334*4882a593Smuzhiyun goto out;
335*4882a593Smuzhiyun map->ring = page;
336*4882a593Smuzhiyun map->ring_order = map->ring->ring_order;
337*4882a593Smuzhiyun /* first read the order, then map the data ring */
338*4882a593Smuzhiyun virt_rmb();
339*4882a593Smuzhiyun if (map->ring_order > MAX_RING_ORDER) {
340*4882a593Smuzhiyun pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
341*4882a593Smuzhiyun __func__, map->ring_order, MAX_RING_ORDER);
342*4882a593Smuzhiyun goto out;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
345*4882a593Smuzhiyun (1 << map->ring_order), &page);
346*4882a593Smuzhiyun if (ret < 0)
347*4882a593Smuzhiyun goto out;
348*4882a593Smuzhiyun map->bytes = page;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
351*4882a593Smuzhiyun fedata->dev->otherend_id, evtchn,
352*4882a593Smuzhiyun pvcalls_back_conn_event, 0, "pvcalls-backend", map);
353*4882a593Smuzhiyun if (ret < 0)
354*4882a593Smuzhiyun goto out;
355*4882a593Smuzhiyun map->irq = ret;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun map->data.in = map->bytes;
358*4882a593Smuzhiyun map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
361*4882a593Smuzhiyun if (!map->ioworker.wq)
362*4882a593Smuzhiyun goto out;
363*4882a593Smuzhiyun atomic_set(&map->io, 1);
364*4882a593Smuzhiyun INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun down(&fedata->socket_lock);
367*4882a593Smuzhiyun list_add_tail(&map->list, &fedata->socket_mappings);
368*4882a593Smuzhiyun up(&fedata->socket_lock);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun write_lock_bh(&map->sock->sk->sk_callback_lock);
371*4882a593Smuzhiyun map->saved_data_ready = map->sock->sk->sk_data_ready;
372*4882a593Smuzhiyun map->sock->sk->sk_user_data = map;
373*4882a593Smuzhiyun map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
374*4882a593Smuzhiyun map->sock->sk->sk_state_change = pvcalls_sk_state_change;
375*4882a593Smuzhiyun write_unlock_bh(&map->sock->sk->sk_callback_lock);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun return map;
378*4882a593Smuzhiyun out:
379*4882a593Smuzhiyun down(&fedata->socket_lock);
380*4882a593Smuzhiyun list_del(&map->list);
381*4882a593Smuzhiyun pvcalls_back_release_active(fedata->dev, fedata, map);
382*4882a593Smuzhiyun up(&fedata->socket_lock);
383*4882a593Smuzhiyun return NULL;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
pvcalls_back_connect(struct xenbus_device * dev,struct xen_pvcalls_request * req)386*4882a593Smuzhiyun static int pvcalls_back_connect(struct xenbus_device *dev,
387*4882a593Smuzhiyun struct xen_pvcalls_request *req)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
390*4882a593Smuzhiyun int ret = -EINVAL;
391*4882a593Smuzhiyun struct socket *sock;
392*4882a593Smuzhiyun struct sock_mapping *map;
393*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
394*4882a593Smuzhiyun struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (req->u.connect.len < sizeof(sa->sa_family) ||
399*4882a593Smuzhiyun req->u.connect.len > sizeof(req->u.connect.addr) ||
400*4882a593Smuzhiyun sa->sa_family != AF_INET)
401*4882a593Smuzhiyun goto out;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
404*4882a593Smuzhiyun if (ret < 0)
405*4882a593Smuzhiyun goto out;
406*4882a593Smuzhiyun ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
407*4882a593Smuzhiyun if (ret < 0) {
408*4882a593Smuzhiyun sock_release(sock);
409*4882a593Smuzhiyun goto out;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun map = pvcalls_new_active_socket(fedata,
413*4882a593Smuzhiyun req->u.connect.id,
414*4882a593Smuzhiyun req->u.connect.ref,
415*4882a593Smuzhiyun req->u.connect.evtchn,
416*4882a593Smuzhiyun sock);
417*4882a593Smuzhiyun if (!map) {
418*4882a593Smuzhiyun ret = -EFAULT;
419*4882a593Smuzhiyun sock_release(sock);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun out:
423*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
424*4882a593Smuzhiyun rsp->req_id = req->req_id;
425*4882a593Smuzhiyun rsp->cmd = req->cmd;
426*4882a593Smuzhiyun rsp->u.connect.id = req->u.connect.id;
427*4882a593Smuzhiyun rsp->ret = ret;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
pvcalls_back_release_active(struct xenbus_device * dev,struct pvcalls_fedata * fedata,struct sock_mapping * map)432*4882a593Smuzhiyun static int pvcalls_back_release_active(struct xenbus_device *dev,
433*4882a593Smuzhiyun struct pvcalls_fedata *fedata,
434*4882a593Smuzhiyun struct sock_mapping *map)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun disable_irq(map->irq);
437*4882a593Smuzhiyun if (map->sock->sk != NULL) {
438*4882a593Smuzhiyun write_lock_bh(&map->sock->sk->sk_callback_lock);
439*4882a593Smuzhiyun map->sock->sk->sk_user_data = NULL;
440*4882a593Smuzhiyun map->sock->sk->sk_data_ready = map->saved_data_ready;
441*4882a593Smuzhiyun write_unlock_bh(&map->sock->sk->sk_callback_lock);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun atomic_set(&map->release, 1);
445*4882a593Smuzhiyun flush_work(&map->ioworker.register_work);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun xenbus_unmap_ring_vfree(dev, map->bytes);
448*4882a593Smuzhiyun xenbus_unmap_ring_vfree(dev, (void *)map->ring);
449*4882a593Smuzhiyun unbind_from_irqhandler(map->irq, map);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun sock_release(map->sock);
452*4882a593Smuzhiyun kfree(map);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun return 0;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
pvcalls_back_release_passive(struct xenbus_device * dev,struct pvcalls_fedata * fedata,struct sockpass_mapping * mappass)457*4882a593Smuzhiyun static int pvcalls_back_release_passive(struct xenbus_device *dev,
458*4882a593Smuzhiyun struct pvcalls_fedata *fedata,
459*4882a593Smuzhiyun struct sockpass_mapping *mappass)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun if (mappass->sock->sk != NULL) {
462*4882a593Smuzhiyun write_lock_bh(&mappass->sock->sk->sk_callback_lock);
463*4882a593Smuzhiyun mappass->sock->sk->sk_user_data = NULL;
464*4882a593Smuzhiyun mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
465*4882a593Smuzhiyun write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun sock_release(mappass->sock);
468*4882a593Smuzhiyun flush_workqueue(mappass->wq);
469*4882a593Smuzhiyun destroy_workqueue(mappass->wq);
470*4882a593Smuzhiyun kfree(mappass);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
pvcalls_back_release(struct xenbus_device * dev,struct xen_pvcalls_request * req)475*4882a593Smuzhiyun static int pvcalls_back_release(struct xenbus_device *dev,
476*4882a593Smuzhiyun struct xen_pvcalls_request *req)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
479*4882a593Smuzhiyun struct sock_mapping *map, *n;
480*4882a593Smuzhiyun struct sockpass_mapping *mappass;
481*4882a593Smuzhiyun int ret = 0;
482*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun down(&fedata->socket_lock);
487*4882a593Smuzhiyun list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
488*4882a593Smuzhiyun if (map->id == req->u.release.id) {
489*4882a593Smuzhiyun list_del(&map->list);
490*4882a593Smuzhiyun up(&fedata->socket_lock);
491*4882a593Smuzhiyun ret = pvcalls_back_release_active(dev, fedata, map);
492*4882a593Smuzhiyun goto out;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun mappass = radix_tree_lookup(&fedata->socketpass_mappings,
496*4882a593Smuzhiyun req->u.release.id);
497*4882a593Smuzhiyun if (mappass != NULL) {
498*4882a593Smuzhiyun radix_tree_delete(&fedata->socketpass_mappings, mappass->id);
499*4882a593Smuzhiyun up(&fedata->socket_lock);
500*4882a593Smuzhiyun ret = pvcalls_back_release_passive(dev, fedata, mappass);
501*4882a593Smuzhiyun } else
502*4882a593Smuzhiyun up(&fedata->socket_lock);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun out:
505*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
506*4882a593Smuzhiyun rsp->req_id = req->req_id;
507*4882a593Smuzhiyun rsp->u.release.id = req->u.release.id;
508*4882a593Smuzhiyun rsp->cmd = req->cmd;
509*4882a593Smuzhiyun rsp->ret = ret;
510*4882a593Smuzhiyun return 0;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
__pvcalls_back_accept(struct work_struct * work)513*4882a593Smuzhiyun static void __pvcalls_back_accept(struct work_struct *work)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun struct sockpass_mapping *mappass = container_of(
516*4882a593Smuzhiyun work, struct sockpass_mapping, register_work);
517*4882a593Smuzhiyun struct sock_mapping *map;
518*4882a593Smuzhiyun struct pvcalls_ioworker *iow;
519*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
520*4882a593Smuzhiyun struct socket *sock;
521*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
522*4882a593Smuzhiyun struct xen_pvcalls_request *req;
523*4882a593Smuzhiyun int notify;
524*4882a593Smuzhiyun int ret = -EINVAL;
525*4882a593Smuzhiyun unsigned long flags;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun fedata = mappass->fedata;
528*4882a593Smuzhiyun /*
529*4882a593Smuzhiyun * __pvcalls_back_accept can race against pvcalls_back_accept.
530*4882a593Smuzhiyun * We only need to check the value of "cmd" on read. It could be
531*4882a593Smuzhiyun * done atomically, but to simplify the code on the write side, we
532*4882a593Smuzhiyun * use a spinlock.
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun spin_lock_irqsave(&mappass->copy_lock, flags);
535*4882a593Smuzhiyun req = &mappass->reqcopy;
536*4882a593Smuzhiyun if (req->cmd != PVCALLS_ACCEPT) {
537*4882a593Smuzhiyun spin_unlock_irqrestore(&mappass->copy_lock, flags);
538*4882a593Smuzhiyun return;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun spin_unlock_irqrestore(&mappass->copy_lock, flags);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun sock = sock_alloc();
543*4882a593Smuzhiyun if (sock == NULL)
544*4882a593Smuzhiyun goto out_error;
545*4882a593Smuzhiyun sock->type = mappass->sock->type;
546*4882a593Smuzhiyun sock->ops = mappass->sock->ops;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
549*4882a593Smuzhiyun if (ret == -EAGAIN) {
550*4882a593Smuzhiyun sock_release(sock);
551*4882a593Smuzhiyun return;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun map = pvcalls_new_active_socket(fedata,
555*4882a593Smuzhiyun req->u.accept.id_new,
556*4882a593Smuzhiyun req->u.accept.ref,
557*4882a593Smuzhiyun req->u.accept.evtchn,
558*4882a593Smuzhiyun sock);
559*4882a593Smuzhiyun if (!map) {
560*4882a593Smuzhiyun ret = -EFAULT;
561*4882a593Smuzhiyun sock_release(sock);
562*4882a593Smuzhiyun goto out_error;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun map->sockpass = mappass;
566*4882a593Smuzhiyun iow = &map->ioworker;
567*4882a593Smuzhiyun atomic_inc(&map->read);
568*4882a593Smuzhiyun atomic_inc(&map->io);
569*4882a593Smuzhiyun queue_work(iow->wq, &iow->register_work);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun out_error:
572*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
573*4882a593Smuzhiyun rsp->req_id = req->req_id;
574*4882a593Smuzhiyun rsp->cmd = req->cmd;
575*4882a593Smuzhiyun rsp->u.accept.id = req->u.accept.id;
576*4882a593Smuzhiyun rsp->ret = ret;
577*4882a593Smuzhiyun RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
578*4882a593Smuzhiyun if (notify)
579*4882a593Smuzhiyun notify_remote_via_irq(fedata->irq);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun mappass->reqcopy.cmd = 0;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
pvcalls_pass_sk_data_ready(struct sock * sock)584*4882a593Smuzhiyun static void pvcalls_pass_sk_data_ready(struct sock *sock)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun struct sockpass_mapping *mappass = sock->sk_user_data;
587*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
588*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
589*4882a593Smuzhiyun unsigned long flags;
590*4882a593Smuzhiyun int notify;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (mappass == NULL)
593*4882a593Smuzhiyun return;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun fedata = mappass->fedata;
596*4882a593Smuzhiyun spin_lock_irqsave(&mappass->copy_lock, flags);
597*4882a593Smuzhiyun if (mappass->reqcopy.cmd == PVCALLS_POLL) {
598*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&fedata->ring,
599*4882a593Smuzhiyun fedata->ring.rsp_prod_pvt++);
600*4882a593Smuzhiyun rsp->req_id = mappass->reqcopy.req_id;
601*4882a593Smuzhiyun rsp->u.poll.id = mappass->reqcopy.u.poll.id;
602*4882a593Smuzhiyun rsp->cmd = mappass->reqcopy.cmd;
603*4882a593Smuzhiyun rsp->ret = 0;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun mappass->reqcopy.cmd = 0;
606*4882a593Smuzhiyun spin_unlock_irqrestore(&mappass->copy_lock, flags);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
609*4882a593Smuzhiyun if (notify)
610*4882a593Smuzhiyun notify_remote_via_irq(mappass->fedata->irq);
611*4882a593Smuzhiyun } else {
612*4882a593Smuzhiyun spin_unlock_irqrestore(&mappass->copy_lock, flags);
613*4882a593Smuzhiyun queue_work(mappass->wq, &mappass->register_work);
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
pvcalls_back_bind(struct xenbus_device * dev,struct xen_pvcalls_request * req)617*4882a593Smuzhiyun static int pvcalls_back_bind(struct xenbus_device *dev,
618*4882a593Smuzhiyun struct xen_pvcalls_request *req)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
621*4882a593Smuzhiyun int ret;
622*4882a593Smuzhiyun struct sockpass_mapping *map;
623*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun map = kzalloc(sizeof(*map), GFP_KERNEL);
628*4882a593Smuzhiyun if (map == NULL) {
629*4882a593Smuzhiyun ret = -ENOMEM;
630*4882a593Smuzhiyun goto out;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun INIT_WORK(&map->register_work, __pvcalls_back_accept);
634*4882a593Smuzhiyun spin_lock_init(&map->copy_lock);
635*4882a593Smuzhiyun map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
636*4882a593Smuzhiyun if (!map->wq) {
637*4882a593Smuzhiyun ret = -ENOMEM;
638*4882a593Smuzhiyun goto out;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
642*4882a593Smuzhiyun if (ret < 0)
643*4882a593Smuzhiyun goto out;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
646*4882a593Smuzhiyun req->u.bind.len);
647*4882a593Smuzhiyun if (ret < 0)
648*4882a593Smuzhiyun goto out;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun map->fedata = fedata;
651*4882a593Smuzhiyun map->id = req->u.bind.id;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun down(&fedata->socket_lock);
654*4882a593Smuzhiyun ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
655*4882a593Smuzhiyun map);
656*4882a593Smuzhiyun up(&fedata->socket_lock);
657*4882a593Smuzhiyun if (ret)
658*4882a593Smuzhiyun goto out;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun write_lock_bh(&map->sock->sk->sk_callback_lock);
661*4882a593Smuzhiyun map->saved_data_ready = map->sock->sk->sk_data_ready;
662*4882a593Smuzhiyun map->sock->sk->sk_user_data = map;
663*4882a593Smuzhiyun map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
664*4882a593Smuzhiyun write_unlock_bh(&map->sock->sk->sk_callback_lock);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun out:
667*4882a593Smuzhiyun if (ret) {
668*4882a593Smuzhiyun if (map && map->sock)
669*4882a593Smuzhiyun sock_release(map->sock);
670*4882a593Smuzhiyun if (map && map->wq)
671*4882a593Smuzhiyun destroy_workqueue(map->wq);
672*4882a593Smuzhiyun kfree(map);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
675*4882a593Smuzhiyun rsp->req_id = req->req_id;
676*4882a593Smuzhiyun rsp->cmd = req->cmd;
677*4882a593Smuzhiyun rsp->u.bind.id = req->u.bind.id;
678*4882a593Smuzhiyun rsp->ret = ret;
679*4882a593Smuzhiyun return 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
pvcalls_back_listen(struct xenbus_device * dev,struct xen_pvcalls_request * req)682*4882a593Smuzhiyun static int pvcalls_back_listen(struct xenbus_device *dev,
683*4882a593Smuzhiyun struct xen_pvcalls_request *req)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
686*4882a593Smuzhiyun int ret = -EINVAL;
687*4882a593Smuzhiyun struct sockpass_mapping *map;
688*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun down(&fedata->socket_lock);
693*4882a593Smuzhiyun map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
694*4882a593Smuzhiyun up(&fedata->socket_lock);
695*4882a593Smuzhiyun if (map == NULL)
696*4882a593Smuzhiyun goto out;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun ret = inet_listen(map->sock, req->u.listen.backlog);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun out:
701*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
702*4882a593Smuzhiyun rsp->req_id = req->req_id;
703*4882a593Smuzhiyun rsp->cmd = req->cmd;
704*4882a593Smuzhiyun rsp->u.listen.id = req->u.listen.id;
705*4882a593Smuzhiyun rsp->ret = ret;
706*4882a593Smuzhiyun return 0;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
pvcalls_back_accept(struct xenbus_device * dev,struct xen_pvcalls_request * req)709*4882a593Smuzhiyun static int pvcalls_back_accept(struct xenbus_device *dev,
710*4882a593Smuzhiyun struct xen_pvcalls_request *req)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
713*4882a593Smuzhiyun struct sockpass_mapping *mappass;
714*4882a593Smuzhiyun int ret = -EINVAL;
715*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
716*4882a593Smuzhiyun unsigned long flags;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun down(&fedata->socket_lock);
721*4882a593Smuzhiyun mappass = radix_tree_lookup(&fedata->socketpass_mappings,
722*4882a593Smuzhiyun req->u.accept.id);
723*4882a593Smuzhiyun up(&fedata->socket_lock);
724*4882a593Smuzhiyun if (mappass == NULL)
725*4882a593Smuzhiyun goto out_error;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun /*
728*4882a593Smuzhiyun * Limitation of the current implementation: only support one
729*4882a593Smuzhiyun * concurrent accept or poll call on one socket.
730*4882a593Smuzhiyun */
731*4882a593Smuzhiyun spin_lock_irqsave(&mappass->copy_lock, flags);
732*4882a593Smuzhiyun if (mappass->reqcopy.cmd != 0) {
733*4882a593Smuzhiyun spin_unlock_irqrestore(&mappass->copy_lock, flags);
734*4882a593Smuzhiyun ret = -EINTR;
735*4882a593Smuzhiyun goto out_error;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun mappass->reqcopy = *req;
739*4882a593Smuzhiyun spin_unlock_irqrestore(&mappass->copy_lock, flags);
740*4882a593Smuzhiyun queue_work(mappass->wq, &mappass->register_work);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /* Tell the caller we don't need to send back a notification yet */
743*4882a593Smuzhiyun return -1;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun out_error:
746*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
747*4882a593Smuzhiyun rsp->req_id = req->req_id;
748*4882a593Smuzhiyun rsp->cmd = req->cmd;
749*4882a593Smuzhiyun rsp->u.accept.id = req->u.accept.id;
750*4882a593Smuzhiyun rsp->ret = ret;
751*4882a593Smuzhiyun return 0;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
pvcalls_back_poll(struct xenbus_device * dev,struct xen_pvcalls_request * req)754*4882a593Smuzhiyun static int pvcalls_back_poll(struct xenbus_device *dev,
755*4882a593Smuzhiyun struct xen_pvcalls_request *req)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
758*4882a593Smuzhiyun struct sockpass_mapping *mappass;
759*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
760*4882a593Smuzhiyun struct inet_connection_sock *icsk;
761*4882a593Smuzhiyun struct request_sock_queue *queue;
762*4882a593Smuzhiyun unsigned long flags;
763*4882a593Smuzhiyun int ret;
764*4882a593Smuzhiyun bool data;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun down(&fedata->socket_lock);
769*4882a593Smuzhiyun mappass = radix_tree_lookup(&fedata->socketpass_mappings,
770*4882a593Smuzhiyun req->u.poll.id);
771*4882a593Smuzhiyun up(&fedata->socket_lock);
772*4882a593Smuzhiyun if (mappass == NULL)
773*4882a593Smuzhiyun return -EINVAL;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /*
776*4882a593Smuzhiyun * Limitation of the current implementation: only support one
777*4882a593Smuzhiyun * concurrent accept or poll call on one socket.
778*4882a593Smuzhiyun */
779*4882a593Smuzhiyun spin_lock_irqsave(&mappass->copy_lock, flags);
780*4882a593Smuzhiyun if (mappass->reqcopy.cmd != 0) {
781*4882a593Smuzhiyun ret = -EINTR;
782*4882a593Smuzhiyun goto out;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun mappass->reqcopy = *req;
786*4882a593Smuzhiyun icsk = inet_csk(mappass->sock->sk);
787*4882a593Smuzhiyun queue = &icsk->icsk_accept_queue;
788*4882a593Smuzhiyun data = READ_ONCE(queue->rskq_accept_head) != NULL;
789*4882a593Smuzhiyun if (data) {
790*4882a593Smuzhiyun mappass->reqcopy.cmd = 0;
791*4882a593Smuzhiyun ret = 0;
792*4882a593Smuzhiyun goto out;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun spin_unlock_irqrestore(&mappass->copy_lock, flags);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /* Tell the caller we don't need to send back a notification yet */
797*4882a593Smuzhiyun return -1;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun out:
800*4882a593Smuzhiyun spin_unlock_irqrestore(&mappass->copy_lock, flags);
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
803*4882a593Smuzhiyun rsp->req_id = req->req_id;
804*4882a593Smuzhiyun rsp->cmd = req->cmd;
805*4882a593Smuzhiyun rsp->u.poll.id = req->u.poll.id;
806*4882a593Smuzhiyun rsp->ret = ret;
807*4882a593Smuzhiyun return 0;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
pvcalls_back_handle_cmd(struct xenbus_device * dev,struct xen_pvcalls_request * req)810*4882a593Smuzhiyun static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
811*4882a593Smuzhiyun struct xen_pvcalls_request *req)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun int ret = 0;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun switch (req->cmd) {
816*4882a593Smuzhiyun case PVCALLS_SOCKET:
817*4882a593Smuzhiyun ret = pvcalls_back_socket(dev, req);
818*4882a593Smuzhiyun break;
819*4882a593Smuzhiyun case PVCALLS_CONNECT:
820*4882a593Smuzhiyun ret = pvcalls_back_connect(dev, req);
821*4882a593Smuzhiyun break;
822*4882a593Smuzhiyun case PVCALLS_RELEASE:
823*4882a593Smuzhiyun ret = pvcalls_back_release(dev, req);
824*4882a593Smuzhiyun break;
825*4882a593Smuzhiyun case PVCALLS_BIND:
826*4882a593Smuzhiyun ret = pvcalls_back_bind(dev, req);
827*4882a593Smuzhiyun break;
828*4882a593Smuzhiyun case PVCALLS_LISTEN:
829*4882a593Smuzhiyun ret = pvcalls_back_listen(dev, req);
830*4882a593Smuzhiyun break;
831*4882a593Smuzhiyun case PVCALLS_ACCEPT:
832*4882a593Smuzhiyun ret = pvcalls_back_accept(dev, req);
833*4882a593Smuzhiyun break;
834*4882a593Smuzhiyun case PVCALLS_POLL:
835*4882a593Smuzhiyun ret = pvcalls_back_poll(dev, req);
836*4882a593Smuzhiyun break;
837*4882a593Smuzhiyun default:
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
840*4882a593Smuzhiyun struct xen_pvcalls_response *rsp;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
843*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(
844*4882a593Smuzhiyun &fedata->ring, fedata->ring.rsp_prod_pvt++);
845*4882a593Smuzhiyun rsp->req_id = req->req_id;
846*4882a593Smuzhiyun rsp->cmd = req->cmd;
847*4882a593Smuzhiyun rsp->ret = -ENOTSUPP;
848*4882a593Smuzhiyun break;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun return ret;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
pvcalls_back_work(struct pvcalls_fedata * fedata)854*4882a593Smuzhiyun static void pvcalls_back_work(struct pvcalls_fedata *fedata)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun int notify, notify_all = 0, more = 1;
857*4882a593Smuzhiyun struct xen_pvcalls_request req;
858*4882a593Smuzhiyun struct xenbus_device *dev = fedata->dev;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun while (more) {
861*4882a593Smuzhiyun while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
862*4882a593Smuzhiyun RING_COPY_REQUEST(&fedata->ring,
863*4882a593Smuzhiyun fedata->ring.req_cons++,
864*4882a593Smuzhiyun &req);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun if (!pvcalls_back_handle_cmd(dev, &req)) {
867*4882a593Smuzhiyun RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
868*4882a593Smuzhiyun &fedata->ring, notify);
869*4882a593Smuzhiyun notify_all += notify;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if (notify_all) {
874*4882a593Smuzhiyun notify_remote_via_irq(fedata->irq);
875*4882a593Smuzhiyun notify_all = 0;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun
pvcalls_back_event(int irq,void * dev_id)882*4882a593Smuzhiyun static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun struct xenbus_device *dev = dev_id;
885*4882a593Smuzhiyun struct pvcalls_fedata *fedata = NULL;
886*4882a593Smuzhiyun unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun if (dev) {
889*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
890*4882a593Smuzhiyun if (fedata) {
891*4882a593Smuzhiyun pvcalls_back_work(fedata);
892*4882a593Smuzhiyun eoi_flags = 0;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun xen_irq_lateeoi(irq, eoi_flags);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun return IRQ_HANDLED;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
pvcalls_back_conn_event(int irq,void * sock_map)901*4882a593Smuzhiyun static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun struct sock_mapping *map = sock_map;
904*4882a593Smuzhiyun struct pvcalls_ioworker *iow;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
907*4882a593Smuzhiyun map->sock->sk->sk_user_data != map) {
908*4882a593Smuzhiyun xen_irq_lateeoi(irq, 0);
909*4882a593Smuzhiyun return IRQ_HANDLED;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun iow = &map->ioworker;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun atomic_inc(&map->write);
915*4882a593Smuzhiyun atomic_inc(&map->eoi);
916*4882a593Smuzhiyun atomic_inc(&map->io);
917*4882a593Smuzhiyun queue_work(iow->wq, &iow->register_work);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun return IRQ_HANDLED;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
backend_connect(struct xenbus_device * dev)922*4882a593Smuzhiyun static int backend_connect(struct xenbus_device *dev)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun int err;
925*4882a593Smuzhiyun evtchn_port_t evtchn;
926*4882a593Smuzhiyun grant_ref_t ring_ref;
927*4882a593Smuzhiyun struct pvcalls_fedata *fedata = NULL;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
930*4882a593Smuzhiyun if (!fedata)
931*4882a593Smuzhiyun return -ENOMEM;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun fedata->irq = -1;
934*4882a593Smuzhiyun err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
935*4882a593Smuzhiyun &evtchn);
936*4882a593Smuzhiyun if (err != 1) {
937*4882a593Smuzhiyun err = -EINVAL;
938*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "reading %s/event-channel",
939*4882a593Smuzhiyun dev->otherend);
940*4882a593Smuzhiyun goto error;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
944*4882a593Smuzhiyun if (err != 1) {
945*4882a593Smuzhiyun err = -EINVAL;
946*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
947*4882a593Smuzhiyun dev->otherend);
948*4882a593Smuzhiyun goto error;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
952*4882a593Smuzhiyun if (err < 0)
953*4882a593Smuzhiyun goto error;
954*4882a593Smuzhiyun fedata->irq = err;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
957*4882a593Smuzhiyun IRQF_ONESHOT, "pvcalls-back", dev);
958*4882a593Smuzhiyun if (err < 0)
959*4882a593Smuzhiyun goto error;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
962*4882a593Smuzhiyun (void **)&fedata->sring);
963*4882a593Smuzhiyun if (err < 0)
964*4882a593Smuzhiyun goto error;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
967*4882a593Smuzhiyun fedata->dev = dev;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun INIT_LIST_HEAD(&fedata->socket_mappings);
970*4882a593Smuzhiyun INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
971*4882a593Smuzhiyun sema_init(&fedata->socket_lock, 1);
972*4882a593Smuzhiyun dev_set_drvdata(&dev->dev, fedata);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun down(&pvcalls_back_global.frontends_lock);
975*4882a593Smuzhiyun list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
976*4882a593Smuzhiyun up(&pvcalls_back_global.frontends_lock);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun return 0;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun error:
981*4882a593Smuzhiyun if (fedata->irq >= 0)
982*4882a593Smuzhiyun unbind_from_irqhandler(fedata->irq, dev);
983*4882a593Smuzhiyun if (fedata->sring != NULL)
984*4882a593Smuzhiyun xenbus_unmap_ring_vfree(dev, fedata->sring);
985*4882a593Smuzhiyun kfree(fedata);
986*4882a593Smuzhiyun return err;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
backend_disconnect(struct xenbus_device * dev)989*4882a593Smuzhiyun static int backend_disconnect(struct xenbus_device *dev)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun struct pvcalls_fedata *fedata;
992*4882a593Smuzhiyun struct sock_mapping *map, *n;
993*4882a593Smuzhiyun struct sockpass_mapping *mappass;
994*4882a593Smuzhiyun struct radix_tree_iter iter;
995*4882a593Smuzhiyun void **slot;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun fedata = dev_get_drvdata(&dev->dev);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun down(&fedata->socket_lock);
1001*4882a593Smuzhiyun list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
1002*4882a593Smuzhiyun list_del(&map->list);
1003*4882a593Smuzhiyun pvcalls_back_release_active(dev, fedata, map);
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
1007*4882a593Smuzhiyun mappass = radix_tree_deref_slot(slot);
1008*4882a593Smuzhiyun if (!mappass)
1009*4882a593Smuzhiyun continue;
1010*4882a593Smuzhiyun if (radix_tree_exception(mappass)) {
1011*4882a593Smuzhiyun if (radix_tree_deref_retry(mappass))
1012*4882a593Smuzhiyun slot = radix_tree_iter_retry(&iter);
1013*4882a593Smuzhiyun } else {
1014*4882a593Smuzhiyun radix_tree_delete(&fedata->socketpass_mappings,
1015*4882a593Smuzhiyun mappass->id);
1016*4882a593Smuzhiyun pvcalls_back_release_passive(dev, fedata, mappass);
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun up(&fedata->socket_lock);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun unbind_from_irqhandler(fedata->irq, dev);
1022*4882a593Smuzhiyun xenbus_unmap_ring_vfree(dev, fedata->sring);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun list_del(&fedata->list);
1025*4882a593Smuzhiyun kfree(fedata);
1026*4882a593Smuzhiyun dev_set_drvdata(&dev->dev, NULL);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun return 0;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
pvcalls_back_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)1031*4882a593Smuzhiyun static int pvcalls_back_probe(struct xenbus_device *dev,
1032*4882a593Smuzhiyun const struct xenbus_device_id *id)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun int err, abort;
1035*4882a593Smuzhiyun struct xenbus_transaction xbt;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun again:
1038*4882a593Smuzhiyun abort = 1;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun err = xenbus_transaction_start(&xbt);
1041*4882a593Smuzhiyun if (err) {
1042*4882a593Smuzhiyun pr_warn("%s cannot create xenstore transaction\n", __func__);
1043*4882a593Smuzhiyun return err;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
1047*4882a593Smuzhiyun PVCALLS_VERSIONS);
1048*4882a593Smuzhiyun if (err) {
1049*4882a593Smuzhiyun pr_warn("%s write out 'versions' failed\n", __func__);
1050*4882a593Smuzhiyun goto abort;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
1054*4882a593Smuzhiyun MAX_RING_ORDER);
1055*4882a593Smuzhiyun if (err) {
1056*4882a593Smuzhiyun pr_warn("%s write out 'max-page-order' failed\n", __func__);
1057*4882a593Smuzhiyun goto abort;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "function-calls",
1061*4882a593Smuzhiyun XENBUS_FUNCTIONS_CALLS);
1062*4882a593Smuzhiyun if (err) {
1063*4882a593Smuzhiyun pr_warn("%s write out 'function-calls' failed\n", __func__);
1064*4882a593Smuzhiyun goto abort;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun abort = 0;
1068*4882a593Smuzhiyun abort:
1069*4882a593Smuzhiyun err = xenbus_transaction_end(xbt, abort);
1070*4882a593Smuzhiyun if (err) {
1071*4882a593Smuzhiyun if (err == -EAGAIN && !abort)
1072*4882a593Smuzhiyun goto again;
1073*4882a593Smuzhiyun pr_warn("%s cannot complete xenstore transaction\n", __func__);
1074*4882a593Smuzhiyun return err;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (abort)
1078*4882a593Smuzhiyun return -EFAULT;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateInitWait);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun return 0;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
set_backend_state(struct xenbus_device * dev,enum xenbus_state state)1085*4882a593Smuzhiyun static void set_backend_state(struct xenbus_device *dev,
1086*4882a593Smuzhiyun enum xenbus_state state)
1087*4882a593Smuzhiyun {
1088*4882a593Smuzhiyun while (dev->state != state) {
1089*4882a593Smuzhiyun switch (dev->state) {
1090*4882a593Smuzhiyun case XenbusStateClosed:
1091*4882a593Smuzhiyun switch (state) {
1092*4882a593Smuzhiyun case XenbusStateInitWait:
1093*4882a593Smuzhiyun case XenbusStateConnected:
1094*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateInitWait);
1095*4882a593Smuzhiyun break;
1096*4882a593Smuzhiyun case XenbusStateClosing:
1097*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateClosing);
1098*4882a593Smuzhiyun break;
1099*4882a593Smuzhiyun default:
1100*4882a593Smuzhiyun WARN_ON(1);
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun break;
1103*4882a593Smuzhiyun case XenbusStateInitWait:
1104*4882a593Smuzhiyun case XenbusStateInitialised:
1105*4882a593Smuzhiyun switch (state) {
1106*4882a593Smuzhiyun case XenbusStateConnected:
1107*4882a593Smuzhiyun if (backend_connect(dev))
1108*4882a593Smuzhiyun return;
1109*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateConnected);
1110*4882a593Smuzhiyun break;
1111*4882a593Smuzhiyun case XenbusStateClosing:
1112*4882a593Smuzhiyun case XenbusStateClosed:
1113*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateClosing);
1114*4882a593Smuzhiyun break;
1115*4882a593Smuzhiyun default:
1116*4882a593Smuzhiyun WARN_ON(1);
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun break;
1119*4882a593Smuzhiyun case XenbusStateConnected:
1120*4882a593Smuzhiyun switch (state) {
1121*4882a593Smuzhiyun case XenbusStateInitWait:
1122*4882a593Smuzhiyun case XenbusStateClosing:
1123*4882a593Smuzhiyun case XenbusStateClosed:
1124*4882a593Smuzhiyun down(&pvcalls_back_global.frontends_lock);
1125*4882a593Smuzhiyun backend_disconnect(dev);
1126*4882a593Smuzhiyun up(&pvcalls_back_global.frontends_lock);
1127*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateClosing);
1128*4882a593Smuzhiyun break;
1129*4882a593Smuzhiyun default:
1130*4882a593Smuzhiyun WARN_ON(1);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun break;
1133*4882a593Smuzhiyun case XenbusStateClosing:
1134*4882a593Smuzhiyun switch (state) {
1135*4882a593Smuzhiyun case XenbusStateInitWait:
1136*4882a593Smuzhiyun case XenbusStateConnected:
1137*4882a593Smuzhiyun case XenbusStateClosed:
1138*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateClosed);
1139*4882a593Smuzhiyun break;
1140*4882a593Smuzhiyun default:
1141*4882a593Smuzhiyun WARN_ON(1);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun break;
1144*4882a593Smuzhiyun default:
1145*4882a593Smuzhiyun WARN_ON(1);
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
pvcalls_back_changed(struct xenbus_device * dev,enum xenbus_state frontend_state)1150*4882a593Smuzhiyun static void pvcalls_back_changed(struct xenbus_device *dev,
1151*4882a593Smuzhiyun enum xenbus_state frontend_state)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun switch (frontend_state) {
1154*4882a593Smuzhiyun case XenbusStateInitialising:
1155*4882a593Smuzhiyun set_backend_state(dev, XenbusStateInitWait);
1156*4882a593Smuzhiyun break;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun case XenbusStateInitialised:
1159*4882a593Smuzhiyun case XenbusStateConnected:
1160*4882a593Smuzhiyun set_backend_state(dev, XenbusStateConnected);
1161*4882a593Smuzhiyun break;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun case XenbusStateClosing:
1164*4882a593Smuzhiyun set_backend_state(dev, XenbusStateClosing);
1165*4882a593Smuzhiyun break;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun case XenbusStateClosed:
1168*4882a593Smuzhiyun set_backend_state(dev, XenbusStateClosed);
1169*4882a593Smuzhiyun if (xenbus_dev_is_online(dev))
1170*4882a593Smuzhiyun break;
1171*4882a593Smuzhiyun device_unregister(&dev->dev);
1172*4882a593Smuzhiyun break;
1173*4882a593Smuzhiyun case XenbusStateUnknown:
1174*4882a593Smuzhiyun set_backend_state(dev, XenbusStateClosed);
1175*4882a593Smuzhiyun device_unregister(&dev->dev);
1176*4882a593Smuzhiyun break;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun default:
1179*4882a593Smuzhiyun xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1180*4882a593Smuzhiyun frontend_state);
1181*4882a593Smuzhiyun break;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
pvcalls_back_remove(struct xenbus_device * dev)1185*4882a593Smuzhiyun static int pvcalls_back_remove(struct xenbus_device *dev)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun return 0;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
pvcalls_back_uevent(struct xenbus_device * xdev,struct kobj_uevent_env * env)1190*4882a593Smuzhiyun static int pvcalls_back_uevent(struct xenbus_device *xdev,
1191*4882a593Smuzhiyun struct kobj_uevent_env *env)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun return 0;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun static const struct xenbus_device_id pvcalls_back_ids[] = {
1197*4882a593Smuzhiyun { "pvcalls" },
1198*4882a593Smuzhiyun { "" }
1199*4882a593Smuzhiyun };
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun static struct xenbus_driver pvcalls_back_driver = {
1202*4882a593Smuzhiyun .ids = pvcalls_back_ids,
1203*4882a593Smuzhiyun .probe = pvcalls_back_probe,
1204*4882a593Smuzhiyun .remove = pvcalls_back_remove,
1205*4882a593Smuzhiyun .uevent = pvcalls_back_uevent,
1206*4882a593Smuzhiyun .otherend_changed = pvcalls_back_changed,
1207*4882a593Smuzhiyun };
1208*4882a593Smuzhiyun
pvcalls_back_init(void)1209*4882a593Smuzhiyun static int __init pvcalls_back_init(void)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun int ret;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (!xen_domain())
1214*4882a593Smuzhiyun return -ENODEV;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun ret = xenbus_register_backend(&pvcalls_back_driver);
1217*4882a593Smuzhiyun if (ret < 0)
1218*4882a593Smuzhiyun return ret;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun sema_init(&pvcalls_back_global.frontends_lock, 1);
1221*4882a593Smuzhiyun INIT_LIST_HEAD(&pvcalls_back_global.frontends);
1222*4882a593Smuzhiyun return 0;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun module_init(pvcalls_back_init);
1225*4882a593Smuzhiyun
pvcalls_back_fin(void)1226*4882a593Smuzhiyun static void __exit pvcalls_back_fin(void)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun struct pvcalls_fedata *fedata, *nfedata;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun down(&pvcalls_back_global.frontends_lock);
1231*4882a593Smuzhiyun list_for_each_entry_safe(fedata, nfedata,
1232*4882a593Smuzhiyun &pvcalls_back_global.frontends, list) {
1233*4882a593Smuzhiyun backend_disconnect(fedata->dev);
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun up(&pvcalls_back_global.frontends_lock);
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun xenbus_unregister_driver(&pvcalls_back_driver);
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun module_exit(pvcalls_back_fin);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun MODULE_DESCRIPTION("Xen PV Calls backend driver");
1243*4882a593Smuzhiyun MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
1244*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1245