1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/clk.h>
7*4882a593Smuzhiyun #include <linux/firmware/imx/ipc.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/io.h>
10*4882a593Smuzhiyun #include <linux/iopoll.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/mailbox_controller.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/of_device.h>
15*4882a593Smuzhiyun #include <linux/pm_runtime.h>
16*4882a593Smuzhiyun #include <linux/suspend.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
20*4882a593Smuzhiyun #define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
21*4882a593Smuzhiyun #define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
22*4882a593Smuzhiyun #define IMX_MU_xSR_BRDIP BIT(9)
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* General Purpose Interrupt Enable */
25*4882a593Smuzhiyun #define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
26*4882a593Smuzhiyun /* Receive Interrupt Enable */
27*4882a593Smuzhiyun #define IMX_MU_xCR_RIEn(x) BIT(24 + (3 - (x)))
28*4882a593Smuzhiyun /* Transmit Interrupt Enable */
29*4882a593Smuzhiyun #define IMX_MU_xCR_TIEn(x) BIT(20 + (3 - (x)))
30*4882a593Smuzhiyun /* General Purpose Interrupt Request */
31*4882a593Smuzhiyun #define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define IMX_MU_CHANS 16
34*4882a593Smuzhiyun /* TX0/RX0/RXDB[0-3] */
35*4882a593Smuzhiyun #define IMX_MU_SCU_CHANS 6
36*4882a593Smuzhiyun #define IMX_MU_CHAN_NAME_SIZE 20
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun enum imx_mu_chan_type {
39*4882a593Smuzhiyun IMX_MU_TYPE_TX, /* Tx */
40*4882a593Smuzhiyun IMX_MU_TYPE_RX, /* Rx */
41*4882a593Smuzhiyun IMX_MU_TYPE_TXDB, /* Tx doorbell */
42*4882a593Smuzhiyun IMX_MU_TYPE_RXDB, /* Rx doorbell */
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun struct imx_sc_rpc_msg_max {
46*4882a593Smuzhiyun struct imx_sc_rpc_msg hdr;
47*4882a593Smuzhiyun u32 data[7];
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun struct imx_mu_con_priv {
51*4882a593Smuzhiyun unsigned int idx;
52*4882a593Smuzhiyun char irq_desc[IMX_MU_CHAN_NAME_SIZE];
53*4882a593Smuzhiyun enum imx_mu_chan_type type;
54*4882a593Smuzhiyun struct mbox_chan *chan;
55*4882a593Smuzhiyun struct tasklet_struct txdb_tasklet;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct imx_mu_priv {
59*4882a593Smuzhiyun struct device *dev;
60*4882a593Smuzhiyun void __iomem *base;
61*4882a593Smuzhiyun spinlock_t xcr_lock; /* control register lock */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct mbox_controller mbox;
64*4882a593Smuzhiyun struct mbox_chan mbox_chans[IMX_MU_CHANS];
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
67*4882a593Smuzhiyun const struct imx_mu_dcfg *dcfg;
68*4882a593Smuzhiyun struct clk *clk;
69*4882a593Smuzhiyun int irq;
70*4882a593Smuzhiyun bool suspend;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun u32 xcr;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun bool side_b;
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun struct imx_mu_dcfg {
78*4882a593Smuzhiyun int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
79*4882a593Smuzhiyun int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
80*4882a593Smuzhiyun void (*init)(struct imx_mu_priv *priv);
81*4882a593Smuzhiyun u32 xTR[4]; /* Transmit Registers */
82*4882a593Smuzhiyun u32 xRR[4]; /* Receive Registers */
83*4882a593Smuzhiyun u32 xSR; /* Status Register */
84*4882a593Smuzhiyun u32 xCR; /* Control Register */
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
to_imx_mu_priv(struct mbox_controller * mbox)87*4882a593Smuzhiyun static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun return container_of(mbox, struct imx_mu_priv, mbox);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
imx_mu_write(struct imx_mu_priv * priv,u32 val,u32 offs)92*4882a593Smuzhiyun static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun iowrite32(val, priv->base + offs);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
imx_mu_read(struct imx_mu_priv * priv,u32 offs)97*4882a593Smuzhiyun static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun return ioread32(priv->base + offs);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
imx_mu_xcr_rmw(struct imx_mu_priv * priv,u32 set,u32 clr)102*4882a593Smuzhiyun static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun unsigned long flags;
105*4882a593Smuzhiyun u32 val;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun spin_lock_irqsave(&priv->xcr_lock, flags);
108*4882a593Smuzhiyun val = imx_mu_read(priv, priv->dcfg->xCR);
109*4882a593Smuzhiyun val &= ~clr;
110*4882a593Smuzhiyun val |= set;
111*4882a593Smuzhiyun imx_mu_write(priv, val, priv->dcfg->xCR);
112*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->xcr_lock, flags);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun return val;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
imx_mu_generic_tx(struct imx_mu_priv * priv,struct imx_mu_con_priv * cp,void * data)117*4882a593Smuzhiyun static int imx_mu_generic_tx(struct imx_mu_priv *priv,
118*4882a593Smuzhiyun struct imx_mu_con_priv *cp,
119*4882a593Smuzhiyun void *data)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun u32 *arg = data;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun switch (cp->type) {
124*4882a593Smuzhiyun case IMX_MU_TYPE_TX:
125*4882a593Smuzhiyun imx_mu_write(priv, *arg, priv->dcfg->xTR[cp->idx]);
126*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
127*4882a593Smuzhiyun break;
128*4882a593Smuzhiyun case IMX_MU_TYPE_TXDB:
129*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
130*4882a593Smuzhiyun tasklet_schedule(&cp->txdb_tasklet);
131*4882a593Smuzhiyun break;
132*4882a593Smuzhiyun default:
133*4882a593Smuzhiyun dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
134*4882a593Smuzhiyun return -EINVAL;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun return 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
imx_mu_generic_rx(struct imx_mu_priv * priv,struct imx_mu_con_priv * cp)140*4882a593Smuzhiyun static int imx_mu_generic_rx(struct imx_mu_priv *priv,
141*4882a593Smuzhiyun struct imx_mu_con_priv *cp)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun u32 dat;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun dat = imx_mu_read(priv, priv->dcfg->xRR[cp->idx]);
146*4882a593Smuzhiyun mbox_chan_received_data(cp->chan, (void *)&dat);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
imx_mu_scu_tx(struct imx_mu_priv * priv,struct imx_mu_con_priv * cp,void * data)151*4882a593Smuzhiyun static int imx_mu_scu_tx(struct imx_mu_priv *priv,
152*4882a593Smuzhiyun struct imx_mu_con_priv *cp,
153*4882a593Smuzhiyun void *data)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct imx_sc_rpc_msg_max *msg = data;
156*4882a593Smuzhiyun u32 *arg = data;
157*4882a593Smuzhiyun int i, ret;
158*4882a593Smuzhiyun u32 xsr;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun switch (cp->type) {
161*4882a593Smuzhiyun case IMX_MU_TYPE_TX:
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * msg->hdr.size specifies the number of u32 words while
164*4882a593Smuzhiyun * sizeof yields bytes.
165*4882a593Smuzhiyun */
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (msg->hdr.size > sizeof(*msg) / 4) {
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * The real message size can be different to
170*4882a593Smuzhiyun * struct imx_sc_rpc_msg_max size
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
173*4882a593Smuzhiyun return -EINVAL;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun for (i = 0; i < 4 && i < msg->hdr.size; i++)
177*4882a593Smuzhiyun imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
178*4882a593Smuzhiyun for (; i < msg->hdr.size; i++) {
179*4882a593Smuzhiyun ret = readl_poll_timeout(priv->base + priv->dcfg->xSR,
180*4882a593Smuzhiyun xsr,
181*4882a593Smuzhiyun xsr & IMX_MU_xSR_TEn(i % 4),
182*4882a593Smuzhiyun 0, 100);
183*4882a593Smuzhiyun if (ret) {
184*4882a593Smuzhiyun dev_err(priv->dev, "Send data index: %d timeout\n", i);
185*4882a593Smuzhiyun return ret;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun imx_mu_write(priv, *arg++, priv->dcfg->xTR[i % 4]);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
191*4882a593Smuzhiyun break;
192*4882a593Smuzhiyun default:
193*4882a593Smuzhiyun dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
194*4882a593Smuzhiyun return -EINVAL;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
imx_mu_scu_rx(struct imx_mu_priv * priv,struct imx_mu_con_priv * cp)200*4882a593Smuzhiyun static int imx_mu_scu_rx(struct imx_mu_priv *priv,
201*4882a593Smuzhiyun struct imx_mu_con_priv *cp)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun struct imx_sc_rpc_msg_max msg;
204*4882a593Smuzhiyun u32 *data = (u32 *)&msg;
205*4882a593Smuzhiyun int i, ret;
206*4882a593Smuzhiyun u32 xsr;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(0));
209*4882a593Smuzhiyun *data++ = imx_mu_read(priv, priv->dcfg->xRR[0]);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (msg.hdr.size > sizeof(msg) / 4) {
212*4882a593Smuzhiyun dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
213*4882a593Smuzhiyun return -EINVAL;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun for (i = 1; i < msg.hdr.size; i++) {
217*4882a593Smuzhiyun ret = readl_poll_timeout(priv->base + priv->dcfg->xSR, xsr,
218*4882a593Smuzhiyun xsr & IMX_MU_xSR_RFn(i % 4), 0, 100);
219*4882a593Smuzhiyun if (ret) {
220*4882a593Smuzhiyun dev_err(priv->dev, "timeout read idx %d\n", i);
221*4882a593Smuzhiyun return ret;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun *data++ = imx_mu_read(priv, priv->dcfg->xRR[i % 4]);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(0), 0);
227*4882a593Smuzhiyun mbox_chan_received_data(cp->chan, (void *)&msg);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun return 0;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
imx_mu_txdb_tasklet(unsigned long data)232*4882a593Smuzhiyun static void imx_mu_txdb_tasklet(unsigned long data)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun mbox_chan_txdone(cp->chan, 0);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
imx_mu_isr(int irq,void * p)239*4882a593Smuzhiyun static irqreturn_t imx_mu_isr(int irq, void *p)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct mbox_chan *chan = p;
242*4882a593Smuzhiyun struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
243*4882a593Smuzhiyun struct imx_mu_con_priv *cp = chan->con_priv;
244*4882a593Smuzhiyun u32 val, ctrl;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun ctrl = imx_mu_read(priv, priv->dcfg->xCR);
247*4882a593Smuzhiyun val = imx_mu_read(priv, priv->dcfg->xSR);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun switch (cp->type) {
250*4882a593Smuzhiyun case IMX_MU_TYPE_TX:
251*4882a593Smuzhiyun val &= IMX_MU_xSR_TEn(cp->idx) &
252*4882a593Smuzhiyun (ctrl & IMX_MU_xCR_TIEn(cp->idx));
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun case IMX_MU_TYPE_RX:
255*4882a593Smuzhiyun val &= IMX_MU_xSR_RFn(cp->idx) &
256*4882a593Smuzhiyun (ctrl & IMX_MU_xCR_RIEn(cp->idx));
257*4882a593Smuzhiyun break;
258*4882a593Smuzhiyun case IMX_MU_TYPE_RXDB:
259*4882a593Smuzhiyun val &= IMX_MU_xSR_GIPn(cp->idx) &
260*4882a593Smuzhiyun (ctrl & IMX_MU_xCR_GIEn(cp->idx));
261*4882a593Smuzhiyun break;
262*4882a593Smuzhiyun default:
263*4882a593Smuzhiyun break;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (!val)
267*4882a593Smuzhiyun return IRQ_NONE;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (val == IMX_MU_xSR_TEn(cp->idx)) {
270*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
271*4882a593Smuzhiyun mbox_chan_txdone(chan, 0);
272*4882a593Smuzhiyun } else if (val == IMX_MU_xSR_RFn(cp->idx)) {
273*4882a593Smuzhiyun priv->dcfg->rx(priv, cp);
274*4882a593Smuzhiyun } else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
275*4882a593Smuzhiyun imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), priv->dcfg->xSR);
276*4882a593Smuzhiyun mbox_chan_received_data(chan, NULL);
277*4882a593Smuzhiyun } else {
278*4882a593Smuzhiyun dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
279*4882a593Smuzhiyun return IRQ_NONE;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (priv->suspend)
283*4882a593Smuzhiyun pm_system_wakeup();
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return IRQ_HANDLED;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
imx_mu_send_data(struct mbox_chan * chan,void * data)288*4882a593Smuzhiyun static int imx_mu_send_data(struct mbox_chan *chan, void *data)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
291*4882a593Smuzhiyun struct imx_mu_con_priv *cp = chan->con_priv;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return priv->dcfg->tx(priv, cp, data);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
imx_mu_startup(struct mbox_chan * chan)296*4882a593Smuzhiyun static int imx_mu_startup(struct mbox_chan *chan)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
299*4882a593Smuzhiyun struct imx_mu_con_priv *cp = chan->con_priv;
300*4882a593Smuzhiyun unsigned long irq_flag = IRQF_SHARED;
301*4882a593Smuzhiyun int ret;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun pm_runtime_get_sync(priv->dev);
304*4882a593Smuzhiyun if (cp->type == IMX_MU_TYPE_TXDB) {
305*4882a593Smuzhiyun /* Tx doorbell don't have ACK support */
306*4882a593Smuzhiyun tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
307*4882a593Smuzhiyun (unsigned long)cp);
308*4882a593Smuzhiyun return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* IPC MU should be with IRQF_NO_SUSPEND set */
312*4882a593Smuzhiyun if (!priv->dev->pm_domain)
313*4882a593Smuzhiyun irq_flag |= IRQF_NO_SUSPEND;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun ret = request_irq(priv->irq, imx_mu_isr, irq_flag,
316*4882a593Smuzhiyun cp->irq_desc, chan);
317*4882a593Smuzhiyun if (ret) {
318*4882a593Smuzhiyun dev_err(priv->dev,
319*4882a593Smuzhiyun "Unable to acquire IRQ %d\n", priv->irq);
320*4882a593Smuzhiyun return ret;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun switch (cp->type) {
324*4882a593Smuzhiyun case IMX_MU_TYPE_RX:
325*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(cp->idx), 0);
326*4882a593Smuzhiyun break;
327*4882a593Smuzhiyun case IMX_MU_TYPE_RXDB:
328*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIEn(cp->idx), 0);
329*4882a593Smuzhiyun break;
330*4882a593Smuzhiyun default:
331*4882a593Smuzhiyun break;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun priv->suspend = true;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun return 0;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
imx_mu_shutdown(struct mbox_chan * chan)339*4882a593Smuzhiyun static void imx_mu_shutdown(struct mbox_chan *chan)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
342*4882a593Smuzhiyun struct imx_mu_con_priv *cp = chan->con_priv;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (cp->type == IMX_MU_TYPE_TXDB) {
345*4882a593Smuzhiyun tasklet_kill(&cp->txdb_tasklet);
346*4882a593Smuzhiyun pm_runtime_put_sync(priv->dev);
347*4882a593Smuzhiyun return;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun switch (cp->type) {
351*4882a593Smuzhiyun case IMX_MU_TYPE_TX:
352*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
353*4882a593Smuzhiyun break;
354*4882a593Smuzhiyun case IMX_MU_TYPE_RX:
355*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_RIEn(cp->idx));
356*4882a593Smuzhiyun break;
357*4882a593Smuzhiyun case IMX_MU_TYPE_RXDB:
358*4882a593Smuzhiyun imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_GIEn(cp->idx));
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun default:
361*4882a593Smuzhiyun break;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun free_irq(priv->irq, chan);
365*4882a593Smuzhiyun pm_runtime_put_sync(priv->dev);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun static const struct mbox_chan_ops imx_mu_ops = {
369*4882a593Smuzhiyun .send_data = imx_mu_send_data,
370*4882a593Smuzhiyun .startup = imx_mu_startup,
371*4882a593Smuzhiyun .shutdown = imx_mu_shutdown,
372*4882a593Smuzhiyun };
373*4882a593Smuzhiyun
imx_mu_scu_xlate(struct mbox_controller * mbox,const struct of_phandle_args * sp)374*4882a593Smuzhiyun static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
375*4882a593Smuzhiyun const struct of_phandle_args *sp)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun u32 type, idx, chan;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (sp->args_count != 2) {
380*4882a593Smuzhiyun dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
381*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun type = sp->args[0]; /* channel type */
385*4882a593Smuzhiyun idx = sp->args[1]; /* index */
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun switch (type) {
388*4882a593Smuzhiyun case IMX_MU_TYPE_TX:
389*4882a593Smuzhiyun case IMX_MU_TYPE_RX:
390*4882a593Smuzhiyun if (idx != 0)
391*4882a593Smuzhiyun dev_err(mbox->dev, "Invalid chan idx: %d\n", idx);
392*4882a593Smuzhiyun chan = type;
393*4882a593Smuzhiyun break;
394*4882a593Smuzhiyun case IMX_MU_TYPE_RXDB:
395*4882a593Smuzhiyun chan = 2 + idx;
396*4882a593Smuzhiyun break;
397*4882a593Smuzhiyun default:
398*4882a593Smuzhiyun dev_err(mbox->dev, "Invalid chan type: %d\n", type);
399*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (chan >= mbox->num_chans) {
403*4882a593Smuzhiyun dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
404*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun return &mbox->chans[chan];
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
imx_mu_xlate(struct mbox_controller * mbox,const struct of_phandle_args * sp)410*4882a593Smuzhiyun static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
411*4882a593Smuzhiyun const struct of_phandle_args *sp)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun u32 type, idx, chan;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (sp->args_count != 2) {
416*4882a593Smuzhiyun dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
417*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun type = sp->args[0]; /* channel type */
421*4882a593Smuzhiyun idx = sp->args[1]; /* index */
422*4882a593Smuzhiyun chan = type * 4 + idx;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (chan >= mbox->num_chans) {
425*4882a593Smuzhiyun dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
426*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun return &mbox->chans[chan];
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
imx_mu_init_generic(struct imx_mu_priv * priv)432*4882a593Smuzhiyun static void imx_mu_init_generic(struct imx_mu_priv *priv)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun unsigned int i;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun for (i = 0; i < IMX_MU_CHANS; i++) {
437*4882a593Smuzhiyun struct imx_mu_con_priv *cp = &priv->con_priv[i];
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun cp->idx = i % 4;
440*4882a593Smuzhiyun cp->type = i >> 2;
441*4882a593Smuzhiyun cp->chan = &priv->mbox_chans[i];
442*4882a593Smuzhiyun priv->mbox_chans[i].con_priv = cp;
443*4882a593Smuzhiyun snprintf(cp->irq_desc, sizeof(cp->irq_desc),
444*4882a593Smuzhiyun "imx_mu_chan[%i-%i]", cp->type, cp->idx);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun priv->mbox.num_chans = IMX_MU_CHANS;
448*4882a593Smuzhiyun priv->mbox.of_xlate = imx_mu_xlate;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if (priv->side_b)
451*4882a593Smuzhiyun return;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /* Set default MU configuration */
454*4882a593Smuzhiyun imx_mu_write(priv, 0, priv->dcfg->xCR);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
imx_mu_init_scu(struct imx_mu_priv * priv)457*4882a593Smuzhiyun static void imx_mu_init_scu(struct imx_mu_priv *priv)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun unsigned int i;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun for (i = 0; i < IMX_MU_SCU_CHANS; i++) {
462*4882a593Smuzhiyun struct imx_mu_con_priv *cp = &priv->con_priv[i];
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun cp->idx = i < 2 ? 0 : i - 2;
465*4882a593Smuzhiyun cp->type = i < 2 ? i : IMX_MU_TYPE_RXDB;
466*4882a593Smuzhiyun cp->chan = &priv->mbox_chans[i];
467*4882a593Smuzhiyun priv->mbox_chans[i].con_priv = cp;
468*4882a593Smuzhiyun snprintf(cp->irq_desc, sizeof(cp->irq_desc),
469*4882a593Smuzhiyun "imx_mu_chan[%i-%i]", cp->type, cp->idx);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun priv->mbox.num_chans = IMX_MU_SCU_CHANS;
473*4882a593Smuzhiyun priv->mbox.of_xlate = imx_mu_scu_xlate;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /* Set default MU configuration */
476*4882a593Smuzhiyun imx_mu_write(priv, 0, priv->dcfg->xCR);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
imx_mu_probe(struct platform_device * pdev)479*4882a593Smuzhiyun static int imx_mu_probe(struct platform_device *pdev)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct device *dev = &pdev->dev;
482*4882a593Smuzhiyun struct device_node *np = dev->of_node;
483*4882a593Smuzhiyun struct imx_mu_priv *priv;
484*4882a593Smuzhiyun const struct imx_mu_dcfg *dcfg;
485*4882a593Smuzhiyun int ret;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
488*4882a593Smuzhiyun if (!priv)
489*4882a593Smuzhiyun return -ENOMEM;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun priv->dev = dev;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun priv->base = devm_platform_ioremap_resource(pdev, 0);
494*4882a593Smuzhiyun if (IS_ERR(priv->base))
495*4882a593Smuzhiyun return PTR_ERR(priv->base);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun priv->irq = platform_get_irq(pdev, 0);
498*4882a593Smuzhiyun if (priv->irq < 0)
499*4882a593Smuzhiyun return priv->irq;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun dcfg = of_device_get_match_data(dev);
502*4882a593Smuzhiyun if (!dcfg)
503*4882a593Smuzhiyun return -EINVAL;
504*4882a593Smuzhiyun priv->dcfg = dcfg;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun priv->clk = devm_clk_get(dev, NULL);
507*4882a593Smuzhiyun if (IS_ERR(priv->clk)) {
508*4882a593Smuzhiyun if (PTR_ERR(priv->clk) != -ENOENT)
509*4882a593Smuzhiyun return PTR_ERR(priv->clk);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun priv->clk = NULL;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun ret = clk_prepare_enable(priv->clk);
515*4882a593Smuzhiyun if (ret) {
516*4882a593Smuzhiyun dev_err(dev, "Failed to enable clock\n");
517*4882a593Smuzhiyun return ret;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun priv->dcfg->init(priv);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun spin_lock_init(&priv->xcr_lock);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun priv->mbox.dev = dev;
527*4882a593Smuzhiyun priv->mbox.ops = &imx_mu_ops;
528*4882a593Smuzhiyun priv->mbox.chans = priv->mbox_chans;
529*4882a593Smuzhiyun priv->mbox.txdone_irq = true;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun platform_set_drvdata(pdev, priv);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun ret = devm_mbox_controller_register(dev, &priv->mbox);
534*4882a593Smuzhiyun if (ret) {
535*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
536*4882a593Smuzhiyun return ret;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun pm_runtime_enable(dev);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun ret = pm_runtime_get_sync(dev);
542*4882a593Smuzhiyun if (ret < 0) {
543*4882a593Smuzhiyun pm_runtime_put_noidle(dev);
544*4882a593Smuzhiyun goto disable_runtime_pm;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun ret = pm_runtime_put_sync(dev);
548*4882a593Smuzhiyun if (ret < 0)
549*4882a593Smuzhiyun goto disable_runtime_pm;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun priv->suspend = false;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun return 0;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun disable_runtime_pm:
558*4882a593Smuzhiyun pm_runtime_disable(dev);
559*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
560*4882a593Smuzhiyun return ret;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
imx_mu_remove(struct platform_device * pdev)563*4882a593Smuzhiyun static int imx_mu_remove(struct platform_device *pdev)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun struct imx_mu_priv *priv = platform_get_drvdata(pdev);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun pm_runtime_disable(priv->dev);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun return 0;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun static const struct imx_mu_dcfg imx_mu_cfg_imx6sx = {
573*4882a593Smuzhiyun .tx = imx_mu_generic_tx,
574*4882a593Smuzhiyun .rx = imx_mu_generic_rx,
575*4882a593Smuzhiyun .init = imx_mu_init_generic,
576*4882a593Smuzhiyun .xTR = {0x0, 0x4, 0x8, 0xc},
577*4882a593Smuzhiyun .xRR = {0x10, 0x14, 0x18, 0x1c},
578*4882a593Smuzhiyun .xSR = 0x20,
579*4882a593Smuzhiyun .xCR = 0x24,
580*4882a593Smuzhiyun };
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun static const struct imx_mu_dcfg imx_mu_cfg_imx7ulp = {
583*4882a593Smuzhiyun .tx = imx_mu_generic_tx,
584*4882a593Smuzhiyun .rx = imx_mu_generic_rx,
585*4882a593Smuzhiyun .init = imx_mu_init_generic,
586*4882a593Smuzhiyun .xTR = {0x20, 0x24, 0x28, 0x2c},
587*4882a593Smuzhiyun .xRR = {0x40, 0x44, 0x48, 0x4c},
588*4882a593Smuzhiyun .xSR = 0x60,
589*4882a593Smuzhiyun .xCR = 0x64,
590*4882a593Smuzhiyun };
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
593*4882a593Smuzhiyun .tx = imx_mu_scu_tx,
594*4882a593Smuzhiyun .rx = imx_mu_scu_rx,
595*4882a593Smuzhiyun .init = imx_mu_init_scu,
596*4882a593Smuzhiyun .xTR = {0x0, 0x4, 0x8, 0xc},
597*4882a593Smuzhiyun .xRR = {0x10, 0x14, 0x18, 0x1c},
598*4882a593Smuzhiyun .xSR = 0x20,
599*4882a593Smuzhiyun .xCR = 0x24,
600*4882a593Smuzhiyun };
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun static const struct of_device_id imx_mu_dt_ids[] = {
603*4882a593Smuzhiyun { .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
604*4882a593Smuzhiyun { .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
605*4882a593Smuzhiyun { .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
606*4882a593Smuzhiyun { },
607*4882a593Smuzhiyun };
608*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
609*4882a593Smuzhiyun
imx_mu_suspend_noirq(struct device * dev)610*4882a593Smuzhiyun static int __maybe_unused imx_mu_suspend_noirq(struct device *dev)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun struct imx_mu_priv *priv = dev_get_drvdata(dev);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (!priv->clk)
615*4882a593Smuzhiyun priv->xcr = imx_mu_read(priv, priv->dcfg->xCR);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun return 0;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
imx_mu_resume_noirq(struct device * dev)620*4882a593Smuzhiyun static int __maybe_unused imx_mu_resume_noirq(struct device *dev)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun struct imx_mu_priv *priv = dev_get_drvdata(dev);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /*
625*4882a593Smuzhiyun * ONLY restore MU when context lost, the TIE could
626*4882a593Smuzhiyun * be set during noirq resume as there is MU data
627*4882a593Smuzhiyun * communication going on, and restore the saved
628*4882a593Smuzhiyun * value will overwrite the TIE and cause MU data
629*4882a593Smuzhiyun * send failed, may lead to system freeze. This issue
630*4882a593Smuzhiyun * is observed by testing freeze mode suspend.
631*4882a593Smuzhiyun */
632*4882a593Smuzhiyun if (!imx_mu_read(priv, priv->dcfg->xCR) && !priv->clk)
633*4882a593Smuzhiyun imx_mu_write(priv, priv->xcr, priv->dcfg->xCR);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun return 0;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
imx_mu_runtime_suspend(struct device * dev)638*4882a593Smuzhiyun static int __maybe_unused imx_mu_runtime_suspend(struct device *dev)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun struct imx_mu_priv *priv = dev_get_drvdata(dev);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun return 0;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
imx_mu_runtime_resume(struct device * dev)647*4882a593Smuzhiyun static int __maybe_unused imx_mu_runtime_resume(struct device *dev)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct imx_mu_priv *priv = dev_get_drvdata(dev);
650*4882a593Smuzhiyun int ret;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun ret = clk_prepare_enable(priv->clk);
653*4882a593Smuzhiyun if (ret)
654*4882a593Smuzhiyun dev_err(dev, "failed to enable clock\n");
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun return ret;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun static const struct dev_pm_ops imx_mu_pm_ops = {
660*4882a593Smuzhiyun SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_mu_suspend_noirq,
661*4882a593Smuzhiyun imx_mu_resume_noirq)
662*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(imx_mu_runtime_suspend,
663*4882a593Smuzhiyun imx_mu_runtime_resume, NULL)
664*4882a593Smuzhiyun };
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun static struct platform_driver imx_mu_driver = {
667*4882a593Smuzhiyun .probe = imx_mu_probe,
668*4882a593Smuzhiyun .remove = imx_mu_remove,
669*4882a593Smuzhiyun .driver = {
670*4882a593Smuzhiyun .name = "imx_mu",
671*4882a593Smuzhiyun .of_match_table = imx_mu_dt_ids,
672*4882a593Smuzhiyun .pm = &imx_mu_pm_ops,
673*4882a593Smuzhiyun },
674*4882a593Smuzhiyun };
675*4882a593Smuzhiyun module_platform_driver(imx_mu_driver);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
678*4882a593Smuzhiyun MODULE_DESCRIPTION("Message Unit driver for i.MX");
679*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
680