1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * rWTM BIU Mailbox driver for Armada 37xx
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Marek Behun <marek.behun@nic.cz>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/device.h>
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/io.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/mailbox_controller.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/armada-37xx-rwtm-mailbox.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define DRIVER_NAME "armada-37xx-rwtm-mailbox"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* relative to rWTM BIU Mailbox Registers */
21*4882a593Smuzhiyun #define RWTM_MBOX_PARAM(i) (0x0 + ((i) << 2))
22*4882a593Smuzhiyun #define RWTM_MBOX_COMMAND 0x40
23*4882a593Smuzhiyun #define RWTM_MBOX_RETURN_STATUS 0x80
24*4882a593Smuzhiyun #define RWTM_MBOX_STATUS(i) (0x84 + ((i) << 2))
25*4882a593Smuzhiyun #define RWTM_MBOX_FIFO_STATUS 0xc4
26*4882a593Smuzhiyun #define FIFO_STS_RDY 0x100
27*4882a593Smuzhiyun #define FIFO_STS_CNTR_MASK 0x7
28*4882a593Smuzhiyun #define FIFO_STS_CNTR_MAX 4
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define RWTM_HOST_INT_RESET 0xc8
31*4882a593Smuzhiyun #define RWTM_HOST_INT_MASK 0xcc
32*4882a593Smuzhiyun #define SP_CMD_COMPLETE BIT(0)
33*4882a593Smuzhiyun #define SP_CMD_QUEUE_FULL_ACCESS BIT(17)
34*4882a593Smuzhiyun #define SP_CMD_QUEUE_FULL BIT(18)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct a37xx_mbox {
37*4882a593Smuzhiyun struct device *dev;
38*4882a593Smuzhiyun struct mbox_controller controller;
39*4882a593Smuzhiyun void __iomem *base;
40*4882a593Smuzhiyun int irq;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun
a37xx_mbox_receive(struct mbox_chan * chan)43*4882a593Smuzhiyun static void a37xx_mbox_receive(struct mbox_chan *chan)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct a37xx_mbox *mbox = chan->con_priv;
46*4882a593Smuzhiyun struct armada_37xx_rwtm_rx_msg rx_msg;
47*4882a593Smuzhiyun int i;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun rx_msg.retval = readl(mbox->base + RWTM_MBOX_RETURN_STATUS);
50*4882a593Smuzhiyun for (i = 0; i < 16; ++i)
51*4882a593Smuzhiyun rx_msg.status[i] = readl(mbox->base + RWTM_MBOX_STATUS(i));
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun mbox_chan_received_data(chan, &rx_msg);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
a37xx_mbox_irq_handler(int irq,void * data)56*4882a593Smuzhiyun static irqreturn_t a37xx_mbox_irq_handler(int irq, void *data)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct mbox_chan *chan = data;
59*4882a593Smuzhiyun struct a37xx_mbox *mbox = chan->con_priv;
60*4882a593Smuzhiyun u32 reg;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun reg = readl(mbox->base + RWTM_HOST_INT_RESET);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if (reg & SP_CMD_COMPLETE)
65*4882a593Smuzhiyun a37xx_mbox_receive(chan);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun if (reg & (SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL))
68*4882a593Smuzhiyun dev_err(mbox->dev, "Secure processor command queue full\n");
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun writel(reg, mbox->base + RWTM_HOST_INT_RESET);
71*4882a593Smuzhiyun if (reg)
72*4882a593Smuzhiyun mbox_chan_txdone(chan, 0);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun return reg ? IRQ_HANDLED : IRQ_NONE;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
a37xx_mbox_send_data(struct mbox_chan * chan,void * data)77*4882a593Smuzhiyun static int a37xx_mbox_send_data(struct mbox_chan *chan, void *data)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct a37xx_mbox *mbox = chan->con_priv;
80*4882a593Smuzhiyun struct armada_37xx_rwtm_tx_msg *msg = data;
81*4882a593Smuzhiyun int i;
82*4882a593Smuzhiyun u32 reg;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (!data)
85*4882a593Smuzhiyun return -EINVAL;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun reg = readl(mbox->base + RWTM_MBOX_FIFO_STATUS);
88*4882a593Smuzhiyun if (!(reg & FIFO_STS_RDY))
89*4882a593Smuzhiyun dev_warn(mbox->dev, "Secure processor not ready\n");
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if ((reg & FIFO_STS_CNTR_MASK) >= FIFO_STS_CNTR_MAX) {
92*4882a593Smuzhiyun dev_err(mbox->dev, "Secure processor command queue full\n");
93*4882a593Smuzhiyun return -EBUSY;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun for (i = 0; i < 16; ++i)
97*4882a593Smuzhiyun writel(msg->args[i], mbox->base + RWTM_MBOX_PARAM(i));
98*4882a593Smuzhiyun writel(msg->command, mbox->base + RWTM_MBOX_COMMAND);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun return 0;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
a37xx_mbox_startup(struct mbox_chan * chan)103*4882a593Smuzhiyun static int a37xx_mbox_startup(struct mbox_chan *chan)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct a37xx_mbox *mbox = chan->con_priv;
106*4882a593Smuzhiyun u32 reg;
107*4882a593Smuzhiyun int ret;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun ret = devm_request_irq(mbox->dev, mbox->irq, a37xx_mbox_irq_handler, 0,
110*4882a593Smuzhiyun DRIVER_NAME, chan);
111*4882a593Smuzhiyun if (ret < 0) {
112*4882a593Smuzhiyun dev_err(mbox->dev, "Cannot request irq\n");
113*4882a593Smuzhiyun return ret;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* enable IRQ generation */
117*4882a593Smuzhiyun reg = readl(mbox->base + RWTM_HOST_INT_MASK);
118*4882a593Smuzhiyun reg &= ~(SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL);
119*4882a593Smuzhiyun writel(reg, mbox->base + RWTM_HOST_INT_MASK);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun return 0;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
a37xx_mbox_shutdown(struct mbox_chan * chan)124*4882a593Smuzhiyun static void a37xx_mbox_shutdown(struct mbox_chan *chan)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun u32 reg;
127*4882a593Smuzhiyun struct a37xx_mbox *mbox = chan->con_priv;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* disable interrupt generation */
130*4882a593Smuzhiyun reg = readl(mbox->base + RWTM_HOST_INT_MASK);
131*4882a593Smuzhiyun reg |= SP_CMD_COMPLETE | SP_CMD_QUEUE_FULL_ACCESS | SP_CMD_QUEUE_FULL;
132*4882a593Smuzhiyun writel(reg, mbox->base + RWTM_HOST_INT_MASK);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun devm_free_irq(mbox->dev, mbox->irq, chan);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun static const struct mbox_chan_ops a37xx_mbox_ops = {
138*4882a593Smuzhiyun .send_data = a37xx_mbox_send_data,
139*4882a593Smuzhiyun .startup = a37xx_mbox_startup,
140*4882a593Smuzhiyun .shutdown = a37xx_mbox_shutdown,
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun
armada_37xx_mbox_probe(struct platform_device * pdev)143*4882a593Smuzhiyun static int armada_37xx_mbox_probe(struct platform_device *pdev)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun struct a37xx_mbox *mbox;
146*4882a593Smuzhiyun struct mbox_chan *chans;
147*4882a593Smuzhiyun int ret;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun mbox = devm_kzalloc(&pdev->dev, sizeof(*mbox), GFP_KERNEL);
150*4882a593Smuzhiyun if (!mbox)
151*4882a593Smuzhiyun return -ENOMEM;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* Allocated one channel */
154*4882a593Smuzhiyun chans = devm_kzalloc(&pdev->dev, sizeof(*chans), GFP_KERNEL);
155*4882a593Smuzhiyun if (!chans)
156*4882a593Smuzhiyun return -ENOMEM;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun mbox->base = devm_platform_ioremap_resource(pdev, 0);
159*4882a593Smuzhiyun if (IS_ERR(mbox->base))
160*4882a593Smuzhiyun return PTR_ERR(mbox->base);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun mbox->irq = platform_get_irq(pdev, 0);
163*4882a593Smuzhiyun if (mbox->irq < 0)
164*4882a593Smuzhiyun return mbox->irq;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun mbox->dev = &pdev->dev;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* Hardware supports only one channel. */
169*4882a593Smuzhiyun chans[0].con_priv = mbox;
170*4882a593Smuzhiyun mbox->controller.dev = mbox->dev;
171*4882a593Smuzhiyun mbox->controller.num_chans = 1;
172*4882a593Smuzhiyun mbox->controller.chans = chans;
173*4882a593Smuzhiyun mbox->controller.ops = &a37xx_mbox_ops;
174*4882a593Smuzhiyun mbox->controller.txdone_irq = true;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun ret = devm_mbox_controller_register(mbox->dev, &mbox->controller);
177*4882a593Smuzhiyun if (ret) {
178*4882a593Smuzhiyun dev_err(&pdev->dev, "Could not register mailbox controller\n");
179*4882a593Smuzhiyun return ret;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun platform_set_drvdata(pdev, mbox);
183*4882a593Smuzhiyun return ret;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun static const struct of_device_id armada_37xx_mbox_match[] = {
188*4882a593Smuzhiyun { .compatible = "marvell,armada-3700-rwtm-mailbox" },
189*4882a593Smuzhiyun { },
190*4882a593Smuzhiyun };
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, armada_37xx_mbox_match);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun static struct platform_driver armada_37xx_mbox_driver = {
195*4882a593Smuzhiyun .probe = armada_37xx_mbox_probe,
196*4882a593Smuzhiyun .driver = {
197*4882a593Smuzhiyun .name = DRIVER_NAME,
198*4882a593Smuzhiyun .of_match_table = armada_37xx_mbox_match,
199*4882a593Smuzhiyun },
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun module_platform_driver(armada_37xx_mbox_driver);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
205*4882a593Smuzhiyun MODULE_DESCRIPTION("rWTM BIU Mailbox driver for Armada 37xx");
206*4882a593Smuzhiyun MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
207