1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Mailbox: Common code for Mailbox controllers and users
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2013-2014 Linaro Ltd.
6*4882a593Smuzhiyun * Author: Jassi Brar <jassisinghbrar@gmail.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/spinlock.h>
11*4882a593Smuzhiyun #include <linux/mutex.h>
12*4882a593Smuzhiyun #include <linux/delay.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/err.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/device.h>
17*4882a593Smuzhiyun #include <linux/bitops.h>
18*4882a593Smuzhiyun #include <linux/mailbox_client.h>
19*4882a593Smuzhiyun #include <linux/mailbox_controller.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include "mailbox.h"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun static LIST_HEAD(mbox_cons);
24*4882a593Smuzhiyun static DEFINE_MUTEX(con_mutex);
25*4882a593Smuzhiyun
add_to_rbuf(struct mbox_chan * chan,void * mssg)26*4882a593Smuzhiyun static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun int idx;
29*4882a593Smuzhiyun unsigned long flags;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* See if there is any space left */
34*4882a593Smuzhiyun if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
35*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
36*4882a593Smuzhiyun return -ENOBUFS;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun idx = chan->msg_free;
40*4882a593Smuzhiyun chan->msg_data[idx] = mssg;
41*4882a593Smuzhiyun chan->msg_count++;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (idx == MBOX_TX_QUEUE_LEN - 1)
44*4882a593Smuzhiyun chan->msg_free = 0;
45*4882a593Smuzhiyun else
46*4882a593Smuzhiyun chan->msg_free++;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun return idx;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
msg_submit(struct mbox_chan * chan)53*4882a593Smuzhiyun static void msg_submit(struct mbox_chan *chan)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun unsigned count, idx;
56*4882a593Smuzhiyun unsigned long flags;
57*4882a593Smuzhiyun void *data;
58*4882a593Smuzhiyun int err = -EBUSY;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (!chan->msg_count || chan->active_req)
63*4882a593Smuzhiyun goto exit;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun count = chan->msg_count;
66*4882a593Smuzhiyun idx = chan->msg_free;
67*4882a593Smuzhiyun if (idx >= count)
68*4882a593Smuzhiyun idx -= count;
69*4882a593Smuzhiyun else
70*4882a593Smuzhiyun idx += MBOX_TX_QUEUE_LEN - count;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun data = chan->msg_data[idx];
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (chan->cl->tx_prepare)
75*4882a593Smuzhiyun chan->cl->tx_prepare(chan->cl, data);
76*4882a593Smuzhiyun /* Try to submit a message to the MBOX controller */
77*4882a593Smuzhiyun err = chan->mbox->ops->send_data(chan, data);
78*4882a593Smuzhiyun if (!err) {
79*4882a593Smuzhiyun chan->active_req = data;
80*4882a593Smuzhiyun chan->msg_count--;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun exit:
83*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* kick start the timer immediately to avoid delays */
86*4882a593Smuzhiyun if (!err && (chan->txdone_method & TXDONE_BY_POLL)) {
87*4882a593Smuzhiyun /* but only if not already active */
88*4882a593Smuzhiyun if (!hrtimer_active(&chan->mbox->poll_hrt))
89*4882a593Smuzhiyun hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
tx_tick(struct mbox_chan * chan,int r)93*4882a593Smuzhiyun static void tx_tick(struct mbox_chan *chan, int r)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun unsigned long flags;
96*4882a593Smuzhiyun void *mssg;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
99*4882a593Smuzhiyun mssg = chan->active_req;
100*4882a593Smuzhiyun chan->active_req = NULL;
101*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Submit next message */
104*4882a593Smuzhiyun msg_submit(chan);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (!mssg)
107*4882a593Smuzhiyun return;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Notify the client */
110*4882a593Smuzhiyun if (chan->cl->tx_done)
111*4882a593Smuzhiyun chan->cl->tx_done(chan->cl, mssg, r);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (r != -ETIME && chan->cl->tx_block)
114*4882a593Smuzhiyun complete(&chan->tx_complete);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
txdone_hrtimer(struct hrtimer * hrtimer)117*4882a593Smuzhiyun static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct mbox_controller *mbox =
120*4882a593Smuzhiyun container_of(hrtimer, struct mbox_controller, poll_hrt);
121*4882a593Smuzhiyun bool txdone, resched = false;
122*4882a593Smuzhiyun int i;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun for (i = 0; i < mbox->num_chans; i++) {
125*4882a593Smuzhiyun struct mbox_chan *chan = &mbox->chans[i];
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (chan->active_req && chan->cl) {
128*4882a593Smuzhiyun resched = true;
129*4882a593Smuzhiyun txdone = chan->mbox->ops->last_tx_done(chan);
130*4882a593Smuzhiyun if (txdone)
131*4882a593Smuzhiyun tx_tick(chan, 0);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (resched) {
136*4882a593Smuzhiyun hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
137*4882a593Smuzhiyun return HRTIMER_RESTART;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun return HRTIMER_NORESTART;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * mbox_chan_received_data - A way for controller driver to push data
144*4882a593Smuzhiyun * received from remote to the upper layer.
145*4882a593Smuzhiyun * @chan: Pointer to the mailbox channel on which RX happened.
146*4882a593Smuzhiyun * @mssg: Client specific message typecasted as void *
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * After startup and before shutdown any data received on the chan
149*4882a593Smuzhiyun * is passed on to the API via atomic mbox_chan_received_data().
150*4882a593Smuzhiyun * The controller should ACK the RX only after this call returns.
151*4882a593Smuzhiyun */
mbox_chan_received_data(struct mbox_chan * chan,void * mssg)152*4882a593Smuzhiyun void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun /* No buffering the received data */
155*4882a593Smuzhiyun if (chan->cl->rx_callback)
156*4882a593Smuzhiyun chan->cl->rx_callback(chan->cl, mssg);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_chan_received_data);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /**
161*4882a593Smuzhiyun * mbox_chan_txdone - A way for controller driver to notify the
162*4882a593Smuzhiyun * framework that the last TX has completed.
163*4882a593Smuzhiyun * @chan: Pointer to the mailbox chan on which TX happened.
164*4882a593Smuzhiyun * @r: Status of last TX - OK or ERROR
165*4882a593Smuzhiyun *
166*4882a593Smuzhiyun * The controller that has IRQ for TX ACK calls this atomic API
167*4882a593Smuzhiyun * to tick the TX state machine. It works only if txdone_irq
168*4882a593Smuzhiyun * is set by the controller.
169*4882a593Smuzhiyun */
mbox_chan_txdone(struct mbox_chan * chan,int r)170*4882a593Smuzhiyun void mbox_chan_txdone(struct mbox_chan *chan, int r)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
173*4882a593Smuzhiyun dev_err(chan->mbox->dev,
174*4882a593Smuzhiyun "Controller can't run the TX ticker\n");
175*4882a593Smuzhiyun return;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun tx_tick(chan, r);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_chan_txdone);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun * mbox_client_txdone - The way for a client to run the TX state machine.
184*4882a593Smuzhiyun * @chan: Mailbox channel assigned to this client.
185*4882a593Smuzhiyun * @r: Success status of last transmission.
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * The client/protocol had received some 'ACK' packet and it notifies
188*4882a593Smuzhiyun * the API that the last packet was sent successfully. This only works
189*4882a593Smuzhiyun * if the controller can't sense TX-Done.
190*4882a593Smuzhiyun */
mbox_client_txdone(struct mbox_chan * chan,int r)191*4882a593Smuzhiyun void mbox_client_txdone(struct mbox_chan *chan, int r)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
194*4882a593Smuzhiyun dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
195*4882a593Smuzhiyun return;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun tx_tick(chan, r);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_client_txdone);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun * mbox_client_peek_data - A way for client driver to pull data
204*4882a593Smuzhiyun * received from remote by the controller.
205*4882a593Smuzhiyun * @chan: Mailbox channel assigned to this client.
206*4882a593Smuzhiyun *
207*4882a593Smuzhiyun * A poke to controller driver for any received data.
208*4882a593Smuzhiyun * The data is actually passed onto client via the
209*4882a593Smuzhiyun * mbox_chan_received_data()
210*4882a593Smuzhiyun * The call can be made from atomic context, so the controller's
211*4882a593Smuzhiyun * implementation of peek_data() must not sleep.
212*4882a593Smuzhiyun *
213*4882a593Smuzhiyun * Return: True, if controller has, and is going to push after this,
214*4882a593Smuzhiyun * some data.
215*4882a593Smuzhiyun * False, if controller doesn't have any data to be read.
216*4882a593Smuzhiyun */
mbox_client_peek_data(struct mbox_chan * chan)217*4882a593Smuzhiyun bool mbox_client_peek_data(struct mbox_chan *chan)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun if (chan->mbox->ops->peek_data)
220*4882a593Smuzhiyun return chan->mbox->ops->peek_data(chan);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return false;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_client_peek_data);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /**
227*4882a593Smuzhiyun * mbox_send_message - For client to submit a message to be
228*4882a593Smuzhiyun * sent to the remote.
229*4882a593Smuzhiyun * @chan: Mailbox channel assigned to this client.
230*4882a593Smuzhiyun * @mssg: Client specific message typecasted.
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * For client to submit data to the controller destined for a remote
233*4882a593Smuzhiyun * processor. If the client had set 'tx_block', the call will return
234*4882a593Smuzhiyun * either when the remote receives the data or when 'tx_tout' millisecs
235*4882a593Smuzhiyun * run out.
236*4882a593Smuzhiyun * In non-blocking mode, the requests are buffered by the API and a
237*4882a593Smuzhiyun * non-negative token is returned for each queued request. If the request
238*4882a593Smuzhiyun * is not queued, a negative token is returned. Upon failure or successful
239*4882a593Smuzhiyun * TX, the API calls 'tx_done' from atomic context, from which the client
240*4882a593Smuzhiyun * could submit yet another request.
241*4882a593Smuzhiyun * The pointer to message should be preserved until it is sent
242*4882a593Smuzhiyun * over the chan, i.e, tx_done() is made.
243*4882a593Smuzhiyun * This function could be called from atomic context as it simply
244*4882a593Smuzhiyun * queues the data and returns a token against the request.
245*4882a593Smuzhiyun *
246*4882a593Smuzhiyun * Return: Non-negative integer for successful submission (non-blocking mode)
247*4882a593Smuzhiyun * or transmission over chan (blocking mode).
248*4882a593Smuzhiyun * Negative value denotes failure.
249*4882a593Smuzhiyun */
mbox_send_message(struct mbox_chan * chan,void * mssg)250*4882a593Smuzhiyun int mbox_send_message(struct mbox_chan *chan, void *mssg)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun int t;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (!chan || !chan->cl)
255*4882a593Smuzhiyun return -EINVAL;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun t = add_to_rbuf(chan, mssg);
258*4882a593Smuzhiyun if (t < 0) {
259*4882a593Smuzhiyun dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
260*4882a593Smuzhiyun return t;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun msg_submit(chan);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (chan->cl->tx_block) {
266*4882a593Smuzhiyun unsigned long wait;
267*4882a593Smuzhiyun int ret;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (!chan->cl->tx_tout) /* wait forever */
270*4882a593Smuzhiyun wait = msecs_to_jiffies(3600000);
271*4882a593Smuzhiyun else
272*4882a593Smuzhiyun wait = msecs_to_jiffies(chan->cl->tx_tout);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun ret = wait_for_completion_timeout(&chan->tx_complete, wait);
275*4882a593Smuzhiyun if (ret == 0) {
276*4882a593Smuzhiyun t = -ETIME;
277*4882a593Smuzhiyun tx_tick(chan, t);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return t;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_send_message);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /**
286*4882a593Smuzhiyun * mbox_flush - flush a mailbox channel
287*4882a593Smuzhiyun * @chan: mailbox channel to flush
288*4882a593Smuzhiyun * @timeout: time, in milliseconds, to allow the flush operation to succeed
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * Mailbox controllers that need to work in atomic context can implement the
291*4882a593Smuzhiyun * ->flush() callback to busy loop until a transmission has been completed.
292*4882a593Smuzhiyun * The implementation must call mbox_chan_txdone() upon success. Clients can
293*4882a593Smuzhiyun * call the mbox_flush() function at any time after mbox_send_message() to
294*4882a593Smuzhiyun * flush the transmission. After the function returns success, the mailbox
295*4882a593Smuzhiyun * transmission is guaranteed to have completed.
296*4882a593Smuzhiyun *
297*4882a593Smuzhiyun * Returns: 0 on success or a negative error code on failure.
298*4882a593Smuzhiyun */
mbox_flush(struct mbox_chan * chan,unsigned long timeout)299*4882a593Smuzhiyun int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun int ret;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (!chan->mbox->ops->flush)
304*4882a593Smuzhiyun return -ENOTSUPP;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun ret = chan->mbox->ops->flush(chan, timeout);
307*4882a593Smuzhiyun if (ret < 0)
308*4882a593Smuzhiyun tx_tick(chan, ret);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun return ret;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_flush);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /**
315*4882a593Smuzhiyun * mbox_request_channel - Request a mailbox channel.
316*4882a593Smuzhiyun * @cl: Identity of the client requesting the channel.
317*4882a593Smuzhiyun * @index: Index of mailbox specifier in 'mboxes' property.
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * The Client specifies its requirements and capabilities while asking for
320*4882a593Smuzhiyun * a mailbox channel. It can't be called from atomic context.
321*4882a593Smuzhiyun * The channel is exclusively allocated and can't be used by another
322*4882a593Smuzhiyun * client before the owner calls mbox_free_channel.
323*4882a593Smuzhiyun * After assignment, any packet received on this channel will be
324*4882a593Smuzhiyun * handed over to the client via the 'rx_callback'.
325*4882a593Smuzhiyun * The framework holds reference to the client, so the mbox_client
326*4882a593Smuzhiyun * structure shouldn't be modified until the mbox_free_channel returns.
327*4882a593Smuzhiyun *
328*4882a593Smuzhiyun * Return: Pointer to the channel assigned to the client if successful.
329*4882a593Smuzhiyun * ERR_PTR for request failure.
330*4882a593Smuzhiyun */
mbox_request_channel(struct mbox_client * cl,int index)331*4882a593Smuzhiyun struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun struct device *dev = cl->dev;
334*4882a593Smuzhiyun struct mbox_controller *mbox;
335*4882a593Smuzhiyun struct of_phandle_args spec;
336*4882a593Smuzhiyun struct mbox_chan *chan;
337*4882a593Smuzhiyun unsigned long flags;
338*4882a593Smuzhiyun int ret;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (!dev || !dev->of_node) {
341*4882a593Smuzhiyun pr_debug("%s: No owner device node\n", __func__);
342*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun mutex_lock(&con_mutex);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (of_parse_phandle_with_args(dev->of_node, "mboxes",
348*4882a593Smuzhiyun "#mbox-cells", index, &spec)) {
349*4882a593Smuzhiyun dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
350*4882a593Smuzhiyun mutex_unlock(&con_mutex);
351*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun chan = ERR_PTR(-EPROBE_DEFER);
355*4882a593Smuzhiyun list_for_each_entry(mbox, &mbox_cons, node)
356*4882a593Smuzhiyun if (mbox->dev->of_node == spec.np) {
357*4882a593Smuzhiyun chan = mbox->of_xlate(mbox, &spec);
358*4882a593Smuzhiyun if (!IS_ERR(chan))
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun of_node_put(spec.np);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (IS_ERR(chan)) {
365*4882a593Smuzhiyun mutex_unlock(&con_mutex);
366*4882a593Smuzhiyun return chan;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
370*4882a593Smuzhiyun dev_dbg(dev, "%s: mailbox not free\n", __func__);
371*4882a593Smuzhiyun mutex_unlock(&con_mutex);
372*4882a593Smuzhiyun return ERR_PTR(-EBUSY);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
376*4882a593Smuzhiyun chan->msg_free = 0;
377*4882a593Smuzhiyun chan->msg_count = 0;
378*4882a593Smuzhiyun chan->active_req = NULL;
379*4882a593Smuzhiyun chan->cl = cl;
380*4882a593Smuzhiyun init_completion(&chan->tx_complete);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
383*4882a593Smuzhiyun chan->txdone_method = TXDONE_BY_ACK;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (chan->mbox->ops->startup) {
388*4882a593Smuzhiyun ret = chan->mbox->ops->startup(chan);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (ret) {
391*4882a593Smuzhiyun dev_err(dev, "Unable to startup the chan (%d)\n", ret);
392*4882a593Smuzhiyun mbox_free_channel(chan);
393*4882a593Smuzhiyun chan = ERR_PTR(ret);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun mutex_unlock(&con_mutex);
398*4882a593Smuzhiyun return chan;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_request_channel);
401*4882a593Smuzhiyun
mbox_request_channel_byname(struct mbox_client * cl,const char * name)402*4882a593Smuzhiyun struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
403*4882a593Smuzhiyun const char *name)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun struct device_node *np = cl->dev->of_node;
406*4882a593Smuzhiyun struct property *prop;
407*4882a593Smuzhiyun const char *mbox_name;
408*4882a593Smuzhiyun int index = 0;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun if (!np) {
411*4882a593Smuzhiyun dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
412*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (!of_get_property(np, "mbox-names", NULL)) {
416*4882a593Smuzhiyun dev_err(cl->dev,
417*4882a593Smuzhiyun "%s() requires an \"mbox-names\" property\n", __func__);
418*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
422*4882a593Smuzhiyun if (!strncmp(name, mbox_name, strlen(name)))
423*4882a593Smuzhiyun return mbox_request_channel(cl, index);
424*4882a593Smuzhiyun index++;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
428*4882a593Smuzhiyun __func__, name);
429*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /**
434*4882a593Smuzhiyun * mbox_free_channel - The client relinquishes control of a mailbox
435*4882a593Smuzhiyun * channel by this call.
436*4882a593Smuzhiyun * @chan: The mailbox channel to be freed.
437*4882a593Smuzhiyun */
mbox_free_channel(struct mbox_chan * chan)438*4882a593Smuzhiyun void mbox_free_channel(struct mbox_chan *chan)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun unsigned long flags;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (!chan || !chan->cl)
443*4882a593Smuzhiyun return;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (chan->mbox->ops->shutdown)
446*4882a593Smuzhiyun chan->mbox->ops->shutdown(chan);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /* The queued TX requests are simply aborted, no callbacks are made */
449*4882a593Smuzhiyun spin_lock_irqsave(&chan->lock, flags);
450*4882a593Smuzhiyun chan->cl = NULL;
451*4882a593Smuzhiyun chan->active_req = NULL;
452*4882a593Smuzhiyun if (chan->txdone_method == TXDONE_BY_ACK)
453*4882a593Smuzhiyun chan->txdone_method = TXDONE_BY_POLL;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun module_put(chan->mbox->dev->driver->owner);
456*4882a593Smuzhiyun spin_unlock_irqrestore(&chan->lock, flags);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_free_channel);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun static struct mbox_chan *
of_mbox_index_xlate(struct mbox_controller * mbox,const struct of_phandle_args * sp)461*4882a593Smuzhiyun of_mbox_index_xlate(struct mbox_controller *mbox,
462*4882a593Smuzhiyun const struct of_phandle_args *sp)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun int ind = sp->args[0];
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (ind >= mbox->num_chans)
467*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun return &mbox->chans[ind];
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /**
473*4882a593Smuzhiyun * mbox_controller_register - Register the mailbox controller
474*4882a593Smuzhiyun * @mbox: Pointer to the mailbox controller.
475*4882a593Smuzhiyun *
476*4882a593Smuzhiyun * The controller driver registers its communication channels
477*4882a593Smuzhiyun */
mbox_controller_register(struct mbox_controller * mbox)478*4882a593Smuzhiyun int mbox_controller_register(struct mbox_controller *mbox)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun int i, txdone;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /* Sanity check */
483*4882a593Smuzhiyun if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
484*4882a593Smuzhiyun return -EINVAL;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (mbox->txdone_irq)
487*4882a593Smuzhiyun txdone = TXDONE_BY_IRQ;
488*4882a593Smuzhiyun else if (mbox->txdone_poll)
489*4882a593Smuzhiyun txdone = TXDONE_BY_POLL;
490*4882a593Smuzhiyun else /* It has to be ACK then */
491*4882a593Smuzhiyun txdone = TXDONE_BY_ACK;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (txdone == TXDONE_BY_POLL) {
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun if (!mbox->ops->last_tx_done) {
496*4882a593Smuzhiyun dev_err(mbox->dev, "last_tx_done method is absent\n");
497*4882a593Smuzhiyun return -EINVAL;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
501*4882a593Smuzhiyun HRTIMER_MODE_REL);
502*4882a593Smuzhiyun mbox->poll_hrt.function = txdone_hrtimer;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun for (i = 0; i < mbox->num_chans; i++) {
506*4882a593Smuzhiyun struct mbox_chan *chan = &mbox->chans[i];
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun chan->cl = NULL;
509*4882a593Smuzhiyun chan->mbox = mbox;
510*4882a593Smuzhiyun chan->txdone_method = txdone;
511*4882a593Smuzhiyun spin_lock_init(&chan->lock);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (!mbox->of_xlate)
515*4882a593Smuzhiyun mbox->of_xlate = of_mbox_index_xlate;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun mutex_lock(&con_mutex);
518*4882a593Smuzhiyun list_add_tail(&mbox->node, &mbox_cons);
519*4882a593Smuzhiyun mutex_unlock(&con_mutex);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_controller_register);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /**
526*4882a593Smuzhiyun * mbox_controller_unregister - Unregister the mailbox controller
527*4882a593Smuzhiyun * @mbox: Pointer to the mailbox controller.
528*4882a593Smuzhiyun */
mbox_controller_unregister(struct mbox_controller * mbox)529*4882a593Smuzhiyun void mbox_controller_unregister(struct mbox_controller *mbox)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun int i;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (!mbox)
534*4882a593Smuzhiyun return;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun mutex_lock(&con_mutex);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun list_del(&mbox->node);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun for (i = 0; i < mbox->num_chans; i++)
541*4882a593Smuzhiyun mbox_free_channel(&mbox->chans[i]);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (mbox->txdone_poll)
544*4882a593Smuzhiyun hrtimer_cancel(&mbox->poll_hrt);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun mutex_unlock(&con_mutex);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mbox_controller_unregister);
549*4882a593Smuzhiyun
__devm_mbox_controller_unregister(struct device * dev,void * res)550*4882a593Smuzhiyun static void __devm_mbox_controller_unregister(struct device *dev, void *res)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun struct mbox_controller **mbox = res;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun mbox_controller_unregister(*mbox);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
devm_mbox_controller_match(struct device * dev,void * res,void * data)557*4882a593Smuzhiyun static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun struct mbox_controller **mbox = res;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (WARN_ON(!mbox || !*mbox))
562*4882a593Smuzhiyun return 0;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun return *mbox == data;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /**
568*4882a593Smuzhiyun * devm_mbox_controller_register() - managed mbox_controller_register()
569*4882a593Smuzhiyun * @dev: device owning the mailbox controller being registered
570*4882a593Smuzhiyun * @mbox: mailbox controller being registered
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * This function adds a device-managed resource that will make sure that the
573*4882a593Smuzhiyun * mailbox controller, which is registered using mbox_controller_register()
574*4882a593Smuzhiyun * as part of this function, will be unregistered along with the rest of
575*4882a593Smuzhiyun * device-managed resources upon driver probe failure or driver removal.
576*4882a593Smuzhiyun *
577*4882a593Smuzhiyun * Returns 0 on success or a negative error code on failure.
578*4882a593Smuzhiyun */
devm_mbox_controller_register(struct device * dev,struct mbox_controller * mbox)579*4882a593Smuzhiyun int devm_mbox_controller_register(struct device *dev,
580*4882a593Smuzhiyun struct mbox_controller *mbox)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun struct mbox_controller **ptr;
583*4882a593Smuzhiyun int err;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
586*4882a593Smuzhiyun GFP_KERNEL);
587*4882a593Smuzhiyun if (!ptr)
588*4882a593Smuzhiyun return -ENOMEM;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun err = mbox_controller_register(mbox);
591*4882a593Smuzhiyun if (err < 0) {
592*4882a593Smuzhiyun devres_free(ptr);
593*4882a593Smuzhiyun return err;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun devres_add(dev, ptr);
597*4882a593Smuzhiyun *ptr = mbox;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun return 0;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /**
604*4882a593Smuzhiyun * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
605*4882a593Smuzhiyun * @dev: device owning the mailbox controller being unregistered
606*4882a593Smuzhiyun * @mbox: mailbox controller being unregistered
607*4882a593Smuzhiyun *
608*4882a593Smuzhiyun * This function unregisters the mailbox controller and removes the device-
609*4882a593Smuzhiyun * managed resource that was set up to automatically unregister the mailbox
610*4882a593Smuzhiyun * controller on driver probe failure or driver removal. It's typically not
611*4882a593Smuzhiyun * necessary to call this function.
612*4882a593Smuzhiyun */
devm_mbox_controller_unregister(struct device * dev,struct mbox_controller * mbox)613*4882a593Smuzhiyun void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
616*4882a593Smuzhiyun devm_mbox_controller_match, mbox));
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);
619