1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun * Xen para-virtual sound device
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2016-2018 EPAM Systems Inc.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <xen/events.h>
12*4882a593Smuzhiyun #include <xen/grant_table.h>
13*4882a593Smuzhiyun #include <xen/xen.h>
14*4882a593Smuzhiyun #include <xen/xenbus.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "xen_snd_front.h"
17*4882a593Smuzhiyun #include "xen_snd_front_alsa.h"
18*4882a593Smuzhiyun #include "xen_snd_front_cfg.h"
19*4882a593Smuzhiyun #include "xen_snd_front_evtchnl.h"
20*4882a593Smuzhiyun
evtchnl_interrupt_req(int irq,void * dev_id)21*4882a593Smuzhiyun static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun struct xen_snd_front_evtchnl *channel = dev_id;
24*4882a593Smuzhiyun struct xen_snd_front_info *front_info = channel->front_info;
25*4882a593Smuzhiyun struct xensnd_resp *resp;
26*4882a593Smuzhiyun RING_IDX i, rp;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
29*4882a593Smuzhiyun return IRQ_HANDLED;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun mutex_lock(&channel->ring_io_lock);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun again:
34*4882a593Smuzhiyun rp = channel->u.req.ring.sring->rsp_prod;
35*4882a593Smuzhiyun /* Ensure we see queued responses up to rp. */
36*4882a593Smuzhiyun rmb();
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Assume that the backend is trusted to always write sane values
40*4882a593Smuzhiyun * to the ring counters, so no overflow checks on frontend side
41*4882a593Smuzhiyun * are required.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun for (i = channel->u.req.ring.rsp_cons; i != rp; i++) {
44*4882a593Smuzhiyun resp = RING_GET_RESPONSE(&channel->u.req.ring, i);
45*4882a593Smuzhiyun if (resp->id != channel->evt_id)
46*4882a593Smuzhiyun continue;
47*4882a593Smuzhiyun switch (resp->operation) {
48*4882a593Smuzhiyun case XENSND_OP_OPEN:
49*4882a593Smuzhiyun case XENSND_OP_CLOSE:
50*4882a593Smuzhiyun case XENSND_OP_READ:
51*4882a593Smuzhiyun case XENSND_OP_WRITE:
52*4882a593Smuzhiyun case XENSND_OP_TRIGGER:
53*4882a593Smuzhiyun channel->u.req.resp_status = resp->status;
54*4882a593Smuzhiyun complete(&channel->u.req.completion);
55*4882a593Smuzhiyun break;
56*4882a593Smuzhiyun case XENSND_OP_HW_PARAM_QUERY:
57*4882a593Smuzhiyun channel->u.req.resp_status = resp->status;
58*4882a593Smuzhiyun channel->u.req.resp.hw_param =
59*4882a593Smuzhiyun resp->resp.hw_param;
60*4882a593Smuzhiyun complete(&channel->u.req.completion);
61*4882a593Smuzhiyun break;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun default:
64*4882a593Smuzhiyun dev_err(&front_info->xb_dev->dev,
65*4882a593Smuzhiyun "Operation %d is not supported\n",
66*4882a593Smuzhiyun resp->operation);
67*4882a593Smuzhiyun break;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun channel->u.req.ring.rsp_cons = i;
72*4882a593Smuzhiyun if (i != channel->u.req.ring.req_prod_pvt) {
73*4882a593Smuzhiyun int more_to_do;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring,
76*4882a593Smuzhiyun more_to_do);
77*4882a593Smuzhiyun if (more_to_do)
78*4882a593Smuzhiyun goto again;
79*4882a593Smuzhiyun } else {
80*4882a593Smuzhiyun channel->u.req.ring.sring->rsp_event = i + 1;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun mutex_unlock(&channel->ring_io_lock);
84*4882a593Smuzhiyun return IRQ_HANDLED;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
evtchnl_interrupt_evt(int irq,void * dev_id)87*4882a593Smuzhiyun static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun struct xen_snd_front_evtchnl *channel = dev_id;
90*4882a593Smuzhiyun struct xensnd_event_page *page = channel->u.evt.page;
91*4882a593Smuzhiyun u32 cons, prod;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED))
94*4882a593Smuzhiyun return IRQ_HANDLED;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun mutex_lock(&channel->ring_io_lock);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun prod = page->in_prod;
99*4882a593Smuzhiyun /* Ensure we see ring contents up to prod. */
100*4882a593Smuzhiyun virt_rmb();
101*4882a593Smuzhiyun if (prod == page->in_cons)
102*4882a593Smuzhiyun goto out;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * Assume that the backend is trusted to always write sane values
106*4882a593Smuzhiyun * to the ring counters, so no overflow checks on frontend side
107*4882a593Smuzhiyun * are required.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun for (cons = page->in_cons; cons != prod; cons++) {
110*4882a593Smuzhiyun struct xensnd_evt *event;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun event = &XENSND_IN_RING_REF(page, cons);
113*4882a593Smuzhiyun if (unlikely(event->id != channel->evt_id++))
114*4882a593Smuzhiyun continue;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun switch (event->type) {
117*4882a593Smuzhiyun case XENSND_EVT_CUR_POS:
118*4882a593Smuzhiyun xen_snd_front_alsa_handle_cur_pos(channel,
119*4882a593Smuzhiyun event->op.cur_pos.position);
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun page->in_cons = cons;
125*4882a593Smuzhiyun /* Ensure ring contents. */
126*4882a593Smuzhiyun virt_wmb();
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun out:
129*4882a593Smuzhiyun mutex_unlock(&channel->ring_io_lock);
130*4882a593Smuzhiyun return IRQ_HANDLED;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl * channel)133*4882a593Smuzhiyun void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun int notify;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun channel->u.req.ring.req_prod_pvt++;
138*4882a593Smuzhiyun RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify);
139*4882a593Smuzhiyun if (notify)
140*4882a593Smuzhiyun notify_remote_via_irq(channel->irq);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
evtchnl_free(struct xen_snd_front_info * front_info,struct xen_snd_front_evtchnl * channel)143*4882a593Smuzhiyun static void evtchnl_free(struct xen_snd_front_info *front_info,
144*4882a593Smuzhiyun struct xen_snd_front_evtchnl *channel)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun unsigned long page = 0;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (channel->type == EVTCHNL_TYPE_REQ)
149*4882a593Smuzhiyun page = (unsigned long)channel->u.req.ring.sring;
150*4882a593Smuzhiyun else if (channel->type == EVTCHNL_TYPE_EVT)
151*4882a593Smuzhiyun page = (unsigned long)channel->u.evt.page;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (!page)
154*4882a593Smuzhiyun return;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun channel->state = EVTCHNL_STATE_DISCONNECTED;
157*4882a593Smuzhiyun if (channel->type == EVTCHNL_TYPE_REQ) {
158*4882a593Smuzhiyun /* Release all who still waits for response if any. */
159*4882a593Smuzhiyun channel->u.req.resp_status = -EIO;
160*4882a593Smuzhiyun complete_all(&channel->u.req.completion);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (channel->irq)
164*4882a593Smuzhiyun unbind_from_irqhandler(channel->irq, channel);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (channel->port)
167*4882a593Smuzhiyun xenbus_free_evtchn(front_info->xb_dev, channel->port);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* End access and free the page. */
170*4882a593Smuzhiyun if (channel->gref != GRANT_INVALID_REF)
171*4882a593Smuzhiyun gnttab_end_foreign_access(channel->gref, 0, page);
172*4882a593Smuzhiyun else
173*4882a593Smuzhiyun free_page(page);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun memset(channel, 0, sizeof(*channel));
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
xen_snd_front_evtchnl_free_all(struct xen_snd_front_info * front_info)178*4882a593Smuzhiyun void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun int i;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (!front_info->evt_pairs)
183*4882a593Smuzhiyun return;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun for (i = 0; i < front_info->num_evt_pairs; i++) {
186*4882a593Smuzhiyun evtchnl_free(front_info, &front_info->evt_pairs[i].req);
187*4882a593Smuzhiyun evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun kfree(front_info->evt_pairs);
191*4882a593Smuzhiyun front_info->evt_pairs = NULL;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
evtchnl_alloc(struct xen_snd_front_info * front_info,int index,struct xen_snd_front_evtchnl * channel,enum xen_snd_front_evtchnl_type type)194*4882a593Smuzhiyun static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index,
195*4882a593Smuzhiyun struct xen_snd_front_evtchnl *channel,
196*4882a593Smuzhiyun enum xen_snd_front_evtchnl_type type)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct xenbus_device *xb_dev = front_info->xb_dev;
199*4882a593Smuzhiyun unsigned long page;
200*4882a593Smuzhiyun grant_ref_t gref;
201*4882a593Smuzhiyun irq_handler_t handler;
202*4882a593Smuzhiyun char *handler_name = NULL;
203*4882a593Smuzhiyun int ret;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun memset(channel, 0, sizeof(*channel));
206*4882a593Smuzhiyun channel->type = type;
207*4882a593Smuzhiyun channel->index = index;
208*4882a593Smuzhiyun channel->front_info = front_info;
209*4882a593Smuzhiyun channel->state = EVTCHNL_STATE_DISCONNECTED;
210*4882a593Smuzhiyun channel->gref = GRANT_INVALID_REF;
211*4882a593Smuzhiyun page = get_zeroed_page(GFP_KERNEL);
212*4882a593Smuzhiyun if (!page) {
213*4882a593Smuzhiyun ret = -ENOMEM;
214*4882a593Smuzhiyun goto fail;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME,
218*4882a593Smuzhiyun type == EVTCHNL_TYPE_REQ ?
219*4882a593Smuzhiyun XENSND_FIELD_RING_REF :
220*4882a593Smuzhiyun XENSND_FIELD_EVT_RING_REF);
221*4882a593Smuzhiyun if (!handler_name) {
222*4882a593Smuzhiyun ret = -ENOMEM;
223*4882a593Smuzhiyun goto fail;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun mutex_init(&channel->ring_io_lock);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (type == EVTCHNL_TYPE_REQ) {
229*4882a593Smuzhiyun struct xen_sndif_sring *sring = (struct xen_sndif_sring *)page;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun init_completion(&channel->u.req.completion);
232*4882a593Smuzhiyun mutex_init(&channel->u.req.req_io_lock);
233*4882a593Smuzhiyun SHARED_RING_INIT(sring);
234*4882a593Smuzhiyun FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun ret = xenbus_grant_ring(xb_dev, sring, 1, &gref);
237*4882a593Smuzhiyun if (ret < 0) {
238*4882a593Smuzhiyun channel->u.req.ring.sring = NULL;
239*4882a593Smuzhiyun goto fail;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun handler = evtchnl_interrupt_req;
243*4882a593Smuzhiyun } else {
244*4882a593Smuzhiyun ret = gnttab_grant_foreign_access(xb_dev->otherend_id,
245*4882a593Smuzhiyun virt_to_gfn((void *)page), 0);
246*4882a593Smuzhiyun if (ret < 0)
247*4882a593Smuzhiyun goto fail;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun channel->u.evt.page = (struct xensnd_event_page *)page;
250*4882a593Smuzhiyun gref = ret;
251*4882a593Smuzhiyun handler = evtchnl_interrupt_evt;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun channel->gref = gref;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun ret = xenbus_alloc_evtchn(xb_dev, &channel->port);
257*4882a593Smuzhiyun if (ret < 0)
258*4882a593Smuzhiyun goto fail;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun ret = bind_evtchn_to_irq(channel->port);
261*4882a593Smuzhiyun if (ret < 0) {
262*4882a593Smuzhiyun dev_err(&xb_dev->dev,
263*4882a593Smuzhiyun "Failed to bind IRQ for domid %d port %d: %d\n",
264*4882a593Smuzhiyun front_info->xb_dev->otherend_id, channel->port, ret);
265*4882a593Smuzhiyun goto fail;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun channel->irq = ret;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun ret = request_threaded_irq(channel->irq, NULL, handler,
271*4882a593Smuzhiyun IRQF_ONESHOT, handler_name, channel);
272*4882a593Smuzhiyun if (ret < 0) {
273*4882a593Smuzhiyun dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n",
274*4882a593Smuzhiyun channel->irq, ret);
275*4882a593Smuzhiyun goto fail;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun kfree(handler_name);
279*4882a593Smuzhiyun return 0;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun fail:
282*4882a593Smuzhiyun if (page)
283*4882a593Smuzhiyun free_page(page);
284*4882a593Smuzhiyun kfree(handler_name);
285*4882a593Smuzhiyun dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret);
286*4882a593Smuzhiyun return ret;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
xen_snd_front_evtchnl_create_all(struct xen_snd_front_info * front_info,int num_streams)289*4882a593Smuzhiyun int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info,
290*4882a593Smuzhiyun int num_streams)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun struct xen_front_cfg_card *cfg = &front_info->cfg;
293*4882a593Smuzhiyun struct device *dev = &front_info->xb_dev->dev;
294*4882a593Smuzhiyun int d, ret = 0;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun front_info->evt_pairs =
297*4882a593Smuzhiyun kcalloc(num_streams,
298*4882a593Smuzhiyun sizeof(struct xen_snd_front_evtchnl_pair),
299*4882a593Smuzhiyun GFP_KERNEL);
300*4882a593Smuzhiyun if (!front_info->evt_pairs)
301*4882a593Smuzhiyun return -ENOMEM;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /* Iterate over devices and their streams and create event channels. */
304*4882a593Smuzhiyun for (d = 0; d < cfg->num_pcm_instances; d++) {
305*4882a593Smuzhiyun struct xen_front_cfg_pcm_instance *pcm_instance;
306*4882a593Smuzhiyun int s, index;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun pcm_instance = &cfg->pcm_instances[d];
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun for (s = 0; s < pcm_instance->num_streams_pb; s++) {
311*4882a593Smuzhiyun index = pcm_instance->streams_pb[s].index;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun ret = evtchnl_alloc(front_info, index,
314*4882a593Smuzhiyun &front_info->evt_pairs[index].req,
315*4882a593Smuzhiyun EVTCHNL_TYPE_REQ);
316*4882a593Smuzhiyun if (ret < 0) {
317*4882a593Smuzhiyun dev_err(dev, "Error allocating control channel\n");
318*4882a593Smuzhiyun goto fail;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun ret = evtchnl_alloc(front_info, index,
322*4882a593Smuzhiyun &front_info->evt_pairs[index].evt,
323*4882a593Smuzhiyun EVTCHNL_TYPE_EVT);
324*4882a593Smuzhiyun if (ret < 0) {
325*4882a593Smuzhiyun dev_err(dev, "Error allocating in-event channel\n");
326*4882a593Smuzhiyun goto fail;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun for (s = 0; s < pcm_instance->num_streams_cap; s++) {
331*4882a593Smuzhiyun index = pcm_instance->streams_cap[s].index;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun ret = evtchnl_alloc(front_info, index,
334*4882a593Smuzhiyun &front_info->evt_pairs[index].req,
335*4882a593Smuzhiyun EVTCHNL_TYPE_REQ);
336*4882a593Smuzhiyun if (ret < 0) {
337*4882a593Smuzhiyun dev_err(dev, "Error allocating control channel\n");
338*4882a593Smuzhiyun goto fail;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun ret = evtchnl_alloc(front_info, index,
342*4882a593Smuzhiyun &front_info->evt_pairs[index].evt,
343*4882a593Smuzhiyun EVTCHNL_TYPE_EVT);
344*4882a593Smuzhiyun if (ret < 0) {
345*4882a593Smuzhiyun dev_err(dev, "Error allocating in-event channel\n");
346*4882a593Smuzhiyun goto fail;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun front_info->num_evt_pairs = num_streams;
352*4882a593Smuzhiyun return 0;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun fail:
355*4882a593Smuzhiyun xen_snd_front_evtchnl_free_all(front_info);
356*4882a593Smuzhiyun return ret;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
evtchnl_publish(struct xenbus_transaction xbt,struct xen_snd_front_evtchnl * channel,const char * path,const char * node_ring,const char * node_chnl)359*4882a593Smuzhiyun static int evtchnl_publish(struct xenbus_transaction xbt,
360*4882a593Smuzhiyun struct xen_snd_front_evtchnl *channel,
361*4882a593Smuzhiyun const char *path, const char *node_ring,
362*4882a593Smuzhiyun const char *node_chnl)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct xenbus_device *xb_dev = channel->front_info->xb_dev;
365*4882a593Smuzhiyun int ret;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* Write control channel ring reference. */
368*4882a593Smuzhiyun ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref);
369*4882a593Smuzhiyun if (ret < 0) {
370*4882a593Smuzhiyun dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret);
371*4882a593Smuzhiyun return ret;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* Write event channel ring reference. */
375*4882a593Smuzhiyun ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port);
376*4882a593Smuzhiyun if (ret < 0) {
377*4882a593Smuzhiyun dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret);
378*4882a593Smuzhiyun return ret;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun return 0;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info * front_info)384*4882a593Smuzhiyun int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct xen_front_cfg_card *cfg = &front_info->cfg;
387*4882a593Smuzhiyun struct xenbus_transaction xbt;
388*4882a593Smuzhiyun int ret, d;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun again:
391*4882a593Smuzhiyun ret = xenbus_transaction_start(&xbt);
392*4882a593Smuzhiyun if (ret < 0) {
393*4882a593Smuzhiyun xenbus_dev_fatal(front_info->xb_dev, ret,
394*4882a593Smuzhiyun "starting transaction");
395*4882a593Smuzhiyun return ret;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun for (d = 0; d < cfg->num_pcm_instances; d++) {
399*4882a593Smuzhiyun struct xen_front_cfg_pcm_instance *pcm_instance;
400*4882a593Smuzhiyun int s, index;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun pcm_instance = &cfg->pcm_instances[d];
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun for (s = 0; s < pcm_instance->num_streams_pb; s++) {
405*4882a593Smuzhiyun index = pcm_instance->streams_pb[s].index;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun ret = evtchnl_publish(xbt,
408*4882a593Smuzhiyun &front_info->evt_pairs[index].req,
409*4882a593Smuzhiyun pcm_instance->streams_pb[s].xenstore_path,
410*4882a593Smuzhiyun XENSND_FIELD_RING_REF,
411*4882a593Smuzhiyun XENSND_FIELD_EVT_CHNL);
412*4882a593Smuzhiyun if (ret < 0)
413*4882a593Smuzhiyun goto fail;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun ret = evtchnl_publish(xbt,
416*4882a593Smuzhiyun &front_info->evt_pairs[index].evt,
417*4882a593Smuzhiyun pcm_instance->streams_pb[s].xenstore_path,
418*4882a593Smuzhiyun XENSND_FIELD_EVT_RING_REF,
419*4882a593Smuzhiyun XENSND_FIELD_EVT_EVT_CHNL);
420*4882a593Smuzhiyun if (ret < 0)
421*4882a593Smuzhiyun goto fail;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun for (s = 0; s < pcm_instance->num_streams_cap; s++) {
425*4882a593Smuzhiyun index = pcm_instance->streams_cap[s].index;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun ret = evtchnl_publish(xbt,
428*4882a593Smuzhiyun &front_info->evt_pairs[index].req,
429*4882a593Smuzhiyun pcm_instance->streams_cap[s].xenstore_path,
430*4882a593Smuzhiyun XENSND_FIELD_RING_REF,
431*4882a593Smuzhiyun XENSND_FIELD_EVT_CHNL);
432*4882a593Smuzhiyun if (ret < 0)
433*4882a593Smuzhiyun goto fail;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun ret = evtchnl_publish(xbt,
436*4882a593Smuzhiyun &front_info->evt_pairs[index].evt,
437*4882a593Smuzhiyun pcm_instance->streams_cap[s].xenstore_path,
438*4882a593Smuzhiyun XENSND_FIELD_EVT_RING_REF,
439*4882a593Smuzhiyun XENSND_FIELD_EVT_EVT_CHNL);
440*4882a593Smuzhiyun if (ret < 0)
441*4882a593Smuzhiyun goto fail;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun ret = xenbus_transaction_end(xbt, 0);
445*4882a593Smuzhiyun if (ret < 0) {
446*4882a593Smuzhiyun if (ret == -EAGAIN)
447*4882a593Smuzhiyun goto again;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun xenbus_dev_fatal(front_info->xb_dev, ret,
450*4882a593Smuzhiyun "completing transaction");
451*4882a593Smuzhiyun goto fail_to_end;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun return 0;
454*4882a593Smuzhiyun fail:
455*4882a593Smuzhiyun xenbus_transaction_end(xbt, 1);
456*4882a593Smuzhiyun fail_to_end:
457*4882a593Smuzhiyun xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore");
458*4882a593Smuzhiyun return ret;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair * evt_pair,bool is_connected)461*4882a593Smuzhiyun void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair,
462*4882a593Smuzhiyun bool is_connected)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun enum xen_snd_front_evtchnl_state state;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (is_connected)
467*4882a593Smuzhiyun state = EVTCHNL_STATE_CONNECTED;
468*4882a593Smuzhiyun else
469*4882a593Smuzhiyun state = EVTCHNL_STATE_DISCONNECTED;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun mutex_lock(&evt_pair->req.ring_io_lock);
472*4882a593Smuzhiyun evt_pair->req.state = state;
473*4882a593Smuzhiyun mutex_unlock(&evt_pair->req.ring_io_lock);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun mutex_lock(&evt_pair->evt.ring_io_lock);
476*4882a593Smuzhiyun evt_pair->evt.state = state;
477*4882a593Smuzhiyun mutex_unlock(&evt_pair->evt.ring_io_lock);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair * evt_pair)480*4882a593Smuzhiyun void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun mutex_lock(&evt_pair->req.ring_io_lock);
483*4882a593Smuzhiyun evt_pair->req.evt_next_id = 0;
484*4882a593Smuzhiyun mutex_unlock(&evt_pair->req.ring_io_lock);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun mutex_lock(&evt_pair->evt.ring_io_lock);
487*4882a593Smuzhiyun evt_pair->evt.evt_next_id = 0;
488*4882a593Smuzhiyun mutex_unlock(&evt_pair->evt.ring_io_lock);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491