1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Marvell MMC/SD/SDIO driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Maen Suleiman, Nicolas Pitre
6*4882a593Smuzhiyun * Copyright (C) 2008-2009 Marvell Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/platform_device.h>
13*4882a593Smuzhiyun #include <linux/mbus.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/dma-mapping.h>
17*4882a593Smuzhiyun #include <linux/scatterlist.h>
18*4882a593Smuzhiyun #include <linux/irq.h>
19*4882a593Smuzhiyun #include <linux/clk.h>
20*4882a593Smuzhiyun #include <linux/of_irq.h>
21*4882a593Smuzhiyun #include <linux/mmc/host.h>
22*4882a593Smuzhiyun #include <linux/mmc/slot-gpio.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/sizes.h>
25*4882a593Smuzhiyun #include <asm/unaligned.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "mvsdio.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define DRIVER_NAME "mvsdio"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun static int maxfreq;
32*4882a593Smuzhiyun static int nodma;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct mvsd_host {
35*4882a593Smuzhiyun void __iomem *base;
36*4882a593Smuzhiyun struct mmc_request *mrq;
37*4882a593Smuzhiyun spinlock_t lock;
38*4882a593Smuzhiyun unsigned int xfer_mode;
39*4882a593Smuzhiyun unsigned int intr_en;
40*4882a593Smuzhiyun unsigned int ctrl;
41*4882a593Smuzhiyun unsigned int pio_size;
42*4882a593Smuzhiyun void *pio_ptr;
43*4882a593Smuzhiyun unsigned int sg_frags;
44*4882a593Smuzhiyun unsigned int ns_per_clk;
45*4882a593Smuzhiyun unsigned int clock;
46*4882a593Smuzhiyun unsigned int base_clock;
47*4882a593Smuzhiyun struct timer_list timer;
48*4882a593Smuzhiyun struct mmc_host *mmc;
49*4882a593Smuzhiyun struct device *dev;
50*4882a593Smuzhiyun struct clk *clk;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define mvsd_write(offs, val) writel(val, iobase + (offs))
54*4882a593Smuzhiyun #define mvsd_read(offs) readl(iobase + (offs))
55*4882a593Smuzhiyun
mvsd_setup_data(struct mvsd_host * host,struct mmc_data * data)56*4882a593Smuzhiyun static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun void __iomem *iobase = host->base;
59*4882a593Smuzhiyun unsigned int tmout;
60*4882a593Smuzhiyun int tmout_index;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE
64*4882a593Smuzhiyun * register is sometimes not set before a while when some
65*4882a593Smuzhiyun * "unusual" data block sizes are used (such as with the SWITCH
66*4882a593Smuzhiyun * command), even despite the fact that the XFER_DONE interrupt
67*4882a593Smuzhiyun * was raised. And if another data transfer starts before
68*4882a593Smuzhiyun * this bit comes to good sense (which eventually happens by
69*4882a593Smuzhiyun * itself) then the new transfer simply fails with a timeout.
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) {
72*4882a593Smuzhiyun unsigned long t = jiffies + HZ;
73*4882a593Smuzhiyun unsigned int hw_state, count = 0;
74*4882a593Smuzhiyun do {
75*4882a593Smuzhiyun hw_state = mvsd_read(MVSD_HW_STATE);
76*4882a593Smuzhiyun if (time_after(jiffies, t)) {
77*4882a593Smuzhiyun dev_warn(host->dev, "FIFO_EMPTY bit missing\n");
78*4882a593Smuzhiyun break;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun count++;
81*4882a593Smuzhiyun } while (!(hw_state & (1 << 13)));
82*4882a593Smuzhiyun dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit "
83*4882a593Smuzhiyun "(hw=0x%04x, count=%d, jiffies=%ld)\n",
84*4882a593Smuzhiyun hw_state, count, jiffies - (t - HZ));
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* If timeout=0 then maximum timeout index is used. */
88*4882a593Smuzhiyun tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk);
89*4882a593Smuzhiyun tmout += data->timeout_clks;
90*4882a593Smuzhiyun tmout_index = fls(tmout - 1) - 12;
91*4882a593Smuzhiyun if (tmout_index < 0)
92*4882a593Smuzhiyun tmout_index = 0;
93*4882a593Smuzhiyun if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX)
94*4882a593Smuzhiyun tmout_index = MVSD_HOST_CTRL_TMOUT_MAX;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n",
97*4882a593Smuzhiyun (data->flags & MMC_DATA_READ) ? "read" : "write",
98*4882a593Smuzhiyun (u32)sg_virt(data->sg), data->blocks, data->blksz,
99*4882a593Smuzhiyun tmout, tmout_index);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK;
102*4882a593Smuzhiyun host->ctrl |= MVSD_HOST_CTRL_TMOUT(tmout_index);
103*4882a593Smuzhiyun mvsd_write(MVSD_HOST_CTRL, host->ctrl);
104*4882a593Smuzhiyun mvsd_write(MVSD_BLK_COUNT, data->blocks);
105*4882a593Smuzhiyun mvsd_write(MVSD_BLK_SIZE, data->blksz);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (nodma || (data->blksz | data->sg->offset) & 3 ||
108*4882a593Smuzhiyun ((!(data->flags & MMC_DATA_READ) && data->sg->offset & 0x3f))) {
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * We cannot do DMA on a buffer which offset or size
111*4882a593Smuzhiyun * is not aligned on a 4-byte boundary.
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * It also appears the host to card DMA can corrupt
114*4882a593Smuzhiyun * data when the buffer is not aligned on a 64 byte
115*4882a593Smuzhiyun * boundary.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun host->pio_size = data->blocks * data->blksz;
118*4882a593Smuzhiyun host->pio_ptr = sg_virt(data->sg);
119*4882a593Smuzhiyun if (!nodma)
120*4882a593Smuzhiyun dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n",
121*4882a593Smuzhiyun host->pio_ptr, host->pio_size);
122*4882a593Smuzhiyun return 1;
123*4882a593Smuzhiyun } else {
124*4882a593Smuzhiyun dma_addr_t phys_addr;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun host->sg_frags = dma_map_sg(mmc_dev(host->mmc),
127*4882a593Smuzhiyun data->sg, data->sg_len,
128*4882a593Smuzhiyun mmc_get_dma_dir(data));
129*4882a593Smuzhiyun phys_addr = sg_dma_address(data->sg);
130*4882a593Smuzhiyun mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
131*4882a593Smuzhiyun mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
132*4882a593Smuzhiyun return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
mvsd_request(struct mmc_host * mmc,struct mmc_request * mrq)136*4882a593Smuzhiyun static void mvsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct mvsd_host *host = mmc_priv(mmc);
139*4882a593Smuzhiyun void __iomem *iobase = host->base;
140*4882a593Smuzhiyun struct mmc_command *cmd = mrq->cmd;
141*4882a593Smuzhiyun u32 cmdreg = 0, xfer = 0, intr = 0;
142*4882a593Smuzhiyun unsigned long flags;
143*4882a593Smuzhiyun unsigned int timeout;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun BUG_ON(host->mrq != NULL);
146*4882a593Smuzhiyun host->mrq = mrq;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun dev_dbg(host->dev, "cmd %d (hw state 0x%04x)\n",
149*4882a593Smuzhiyun cmd->opcode, mvsd_read(MVSD_HW_STATE));
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun cmdreg = MVSD_CMD_INDEX(cmd->opcode);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (cmd->flags & MMC_RSP_BUSY)
154*4882a593Smuzhiyun cmdreg |= MVSD_CMD_RSP_48BUSY;
155*4882a593Smuzhiyun else if (cmd->flags & MMC_RSP_136)
156*4882a593Smuzhiyun cmdreg |= MVSD_CMD_RSP_136;
157*4882a593Smuzhiyun else if (cmd->flags & MMC_RSP_PRESENT)
158*4882a593Smuzhiyun cmdreg |= MVSD_CMD_RSP_48;
159*4882a593Smuzhiyun else
160*4882a593Smuzhiyun cmdreg |= MVSD_CMD_RSP_NONE;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (cmd->flags & MMC_RSP_CRC)
163*4882a593Smuzhiyun cmdreg |= MVSD_CMD_CHECK_CMDCRC;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (cmd->flags & MMC_RSP_OPCODE)
166*4882a593Smuzhiyun cmdreg |= MVSD_CMD_INDX_CHECK;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (cmd->flags & MMC_RSP_PRESENT) {
169*4882a593Smuzhiyun cmdreg |= MVSD_UNEXPECTED_RESP;
170*4882a593Smuzhiyun intr |= MVSD_NOR_UNEXP_RSP;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (mrq->data) {
174*4882a593Smuzhiyun struct mmc_data *data = mrq->data;
175*4882a593Smuzhiyun int pio;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun cmdreg |= MVSD_CMD_DATA_PRESENT | MVSD_CMD_CHECK_DATACRC16;
178*4882a593Smuzhiyun xfer |= MVSD_XFER_MODE_HW_WR_DATA_EN;
179*4882a593Smuzhiyun if (data->flags & MMC_DATA_READ)
180*4882a593Smuzhiyun xfer |= MVSD_XFER_MODE_TO_HOST;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun pio = mvsd_setup_data(host, data);
183*4882a593Smuzhiyun if (pio) {
184*4882a593Smuzhiyun xfer |= MVSD_XFER_MODE_PIO;
185*4882a593Smuzhiyun /* PIO section of mvsd_irq has comments on those bits */
186*4882a593Smuzhiyun if (data->flags & MMC_DATA_WRITE)
187*4882a593Smuzhiyun intr |= MVSD_NOR_TX_AVAIL;
188*4882a593Smuzhiyun else if (host->pio_size > 32)
189*4882a593Smuzhiyun intr |= MVSD_NOR_RX_FIFO_8W;
190*4882a593Smuzhiyun else
191*4882a593Smuzhiyun intr |= MVSD_NOR_RX_READY;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (data->stop) {
195*4882a593Smuzhiyun struct mmc_command *stop = data->stop;
196*4882a593Smuzhiyun u32 cmd12reg = 0;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun mvsd_write(MVSD_AUTOCMD12_ARG_LOW, stop->arg & 0xffff);
199*4882a593Smuzhiyun mvsd_write(MVSD_AUTOCMD12_ARG_HI, stop->arg >> 16);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (stop->flags & MMC_RSP_BUSY)
202*4882a593Smuzhiyun cmd12reg |= MVSD_AUTOCMD12_BUSY;
203*4882a593Smuzhiyun if (stop->flags & MMC_RSP_OPCODE)
204*4882a593Smuzhiyun cmd12reg |= MVSD_AUTOCMD12_INDX_CHECK;
205*4882a593Smuzhiyun cmd12reg |= MVSD_AUTOCMD12_INDEX(stop->opcode);
206*4882a593Smuzhiyun mvsd_write(MVSD_AUTOCMD12_CMD, cmd12reg);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun xfer |= MVSD_XFER_MODE_AUTO_CMD12;
209*4882a593Smuzhiyun intr |= MVSD_NOR_AUTOCMD12_DONE;
210*4882a593Smuzhiyun } else {
211*4882a593Smuzhiyun intr |= MVSD_NOR_XFER_DONE;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun } else {
214*4882a593Smuzhiyun intr |= MVSD_NOR_CMD_DONE;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun mvsd_write(MVSD_ARG_LOW, cmd->arg & 0xffff);
218*4882a593Smuzhiyun mvsd_write(MVSD_ARG_HI, cmd->arg >> 16);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun spin_lock_irqsave(&host->lock, flags);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN;
223*4882a593Smuzhiyun host->xfer_mode |= xfer;
224*4882a593Smuzhiyun mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_STATUS, ~MVSD_NOR_CARD_INT);
227*4882a593Smuzhiyun mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
228*4882a593Smuzhiyun mvsd_write(MVSD_CMD, cmdreg);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun host->intr_en &= MVSD_NOR_CARD_INT;
231*4882a593Smuzhiyun host->intr_en |= intr | MVSD_NOR_ERROR;
232*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
233*4882a593Smuzhiyun mvsd_write(MVSD_ERR_INTR_EN, 0xffff);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun timeout = cmd->busy_timeout ? cmd->busy_timeout : 5000;
236*4882a593Smuzhiyun mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout));
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun spin_unlock_irqrestore(&host->lock, flags);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
mvsd_finish_cmd(struct mvsd_host * host,struct mmc_command * cmd,u32 err_status)241*4882a593Smuzhiyun static u32 mvsd_finish_cmd(struct mvsd_host *host, struct mmc_command *cmd,
242*4882a593Smuzhiyun u32 err_status)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun void __iomem *iobase = host->base;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (cmd->flags & MMC_RSP_136) {
247*4882a593Smuzhiyun unsigned int response[8], i;
248*4882a593Smuzhiyun for (i = 0; i < 8; i++)
249*4882a593Smuzhiyun response[i] = mvsd_read(MVSD_RSP(i));
250*4882a593Smuzhiyun cmd->resp[0] = ((response[0] & 0x03ff) << 22) |
251*4882a593Smuzhiyun ((response[1] & 0xffff) << 6) |
252*4882a593Smuzhiyun ((response[2] & 0xfc00) >> 10);
253*4882a593Smuzhiyun cmd->resp[1] = ((response[2] & 0x03ff) << 22) |
254*4882a593Smuzhiyun ((response[3] & 0xffff) << 6) |
255*4882a593Smuzhiyun ((response[4] & 0xfc00) >> 10);
256*4882a593Smuzhiyun cmd->resp[2] = ((response[4] & 0x03ff) << 22) |
257*4882a593Smuzhiyun ((response[5] & 0xffff) << 6) |
258*4882a593Smuzhiyun ((response[6] & 0xfc00) >> 10);
259*4882a593Smuzhiyun cmd->resp[3] = ((response[6] & 0x03ff) << 22) |
260*4882a593Smuzhiyun ((response[7] & 0x3fff) << 8);
261*4882a593Smuzhiyun } else if (cmd->flags & MMC_RSP_PRESENT) {
262*4882a593Smuzhiyun unsigned int response[3], i;
263*4882a593Smuzhiyun for (i = 0; i < 3; i++)
264*4882a593Smuzhiyun response[i] = mvsd_read(MVSD_RSP(i));
265*4882a593Smuzhiyun cmd->resp[0] = ((response[2] & 0x003f) << (8 - 8)) |
266*4882a593Smuzhiyun ((response[1] & 0xffff) << (14 - 8)) |
267*4882a593Smuzhiyun ((response[0] & 0x03ff) << (30 - 8));
268*4882a593Smuzhiyun cmd->resp[1] = ((response[0] & 0xfc00) >> 10);
269*4882a593Smuzhiyun cmd->resp[2] = 0;
270*4882a593Smuzhiyun cmd->resp[3] = 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (err_status & MVSD_ERR_CMD_TIMEOUT) {
274*4882a593Smuzhiyun cmd->error = -ETIMEDOUT;
275*4882a593Smuzhiyun } else if (err_status & (MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT |
276*4882a593Smuzhiyun MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT)) {
277*4882a593Smuzhiyun cmd->error = -EILSEQ;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun err_status &= ~(MVSD_ERR_CMD_TIMEOUT | MVSD_ERR_CMD_CRC |
280*4882a593Smuzhiyun MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX |
281*4882a593Smuzhiyun MVSD_ERR_CMD_STARTBIT);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun return err_status;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
mvsd_finish_data(struct mvsd_host * host,struct mmc_data * data,u32 err_status)286*4882a593Smuzhiyun static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
287*4882a593Smuzhiyun u32 err_status)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun void __iomem *iobase = host->base;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (host->pio_ptr) {
292*4882a593Smuzhiyun host->pio_ptr = NULL;
293*4882a593Smuzhiyun host->pio_size = 0;
294*4882a593Smuzhiyun } else {
295*4882a593Smuzhiyun dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
296*4882a593Smuzhiyun mmc_get_dma_dir(data));
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (err_status & MVSD_ERR_DATA_TIMEOUT)
300*4882a593Smuzhiyun data->error = -ETIMEDOUT;
301*4882a593Smuzhiyun else if (err_status & (MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT))
302*4882a593Smuzhiyun data->error = -EILSEQ;
303*4882a593Smuzhiyun else if (err_status & MVSD_ERR_XFER_SIZE)
304*4882a593Smuzhiyun data->error = -EBADE;
305*4882a593Smuzhiyun err_status &= ~(MVSD_ERR_DATA_TIMEOUT | MVSD_ERR_DATA_CRC |
306*4882a593Smuzhiyun MVSD_ERR_DATA_ENDBIT | MVSD_ERR_XFER_SIZE);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun dev_dbg(host->dev, "data done: blocks_left=%d, bytes_left=%d\n",
309*4882a593Smuzhiyun mvsd_read(MVSD_CURR_BLK_LEFT), mvsd_read(MVSD_CURR_BYTE_LEFT));
310*4882a593Smuzhiyun data->bytes_xfered =
311*4882a593Smuzhiyun (data->blocks - mvsd_read(MVSD_CURR_BLK_LEFT)) * data->blksz;
312*4882a593Smuzhiyun /* We can't be sure about the last block when errors are detected */
313*4882a593Smuzhiyun if (data->bytes_xfered && data->error)
314*4882a593Smuzhiyun data->bytes_xfered -= data->blksz;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Handle Auto cmd 12 response */
317*4882a593Smuzhiyun if (data->stop) {
318*4882a593Smuzhiyun unsigned int response[3], i;
319*4882a593Smuzhiyun for (i = 0; i < 3; i++)
320*4882a593Smuzhiyun response[i] = mvsd_read(MVSD_AUTO_RSP(i));
321*4882a593Smuzhiyun data->stop->resp[0] = ((response[2] & 0x003f) << (8 - 8)) |
322*4882a593Smuzhiyun ((response[1] & 0xffff) << (14 - 8)) |
323*4882a593Smuzhiyun ((response[0] & 0x03ff) << (30 - 8));
324*4882a593Smuzhiyun data->stop->resp[1] = ((response[0] & 0xfc00) >> 10);
325*4882a593Smuzhiyun data->stop->resp[2] = 0;
326*4882a593Smuzhiyun data->stop->resp[3] = 0;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (err_status & MVSD_ERR_AUTOCMD12) {
329*4882a593Smuzhiyun u32 err_cmd12 = mvsd_read(MVSD_AUTOCMD12_ERR_STATUS);
330*4882a593Smuzhiyun dev_dbg(host->dev, "c12err 0x%04x\n", err_cmd12);
331*4882a593Smuzhiyun if (err_cmd12 & MVSD_AUTOCMD12_ERR_NOTEXE)
332*4882a593Smuzhiyun data->stop->error = -ENOEXEC;
333*4882a593Smuzhiyun else if (err_cmd12 & MVSD_AUTOCMD12_ERR_TIMEOUT)
334*4882a593Smuzhiyun data->stop->error = -ETIMEDOUT;
335*4882a593Smuzhiyun else if (err_cmd12)
336*4882a593Smuzhiyun data->stop->error = -EILSEQ;
337*4882a593Smuzhiyun err_status &= ~MVSD_ERR_AUTOCMD12;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return err_status;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
mvsd_irq(int irq,void * dev)344*4882a593Smuzhiyun static irqreturn_t mvsd_irq(int irq, void *dev)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun struct mvsd_host *host = dev;
347*4882a593Smuzhiyun void __iomem *iobase = host->base;
348*4882a593Smuzhiyun u32 intr_status, intr_done_mask;
349*4882a593Smuzhiyun int irq_handled = 0;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
352*4882a593Smuzhiyun dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
353*4882a593Smuzhiyun intr_status, mvsd_read(MVSD_NOR_INTR_EN),
354*4882a593Smuzhiyun mvsd_read(MVSD_HW_STATE));
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * It looks like, SDIO IP can issue one late, spurious irq
358*4882a593Smuzhiyun * although all irqs should be disabled. To work around this,
359*4882a593Smuzhiyun * bail out early, if we didn't expect any irqs to occur.
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) {
362*4882a593Smuzhiyun dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n",
363*4882a593Smuzhiyun mvsd_read(MVSD_NOR_INTR_STATUS),
364*4882a593Smuzhiyun mvsd_read(MVSD_NOR_INTR_EN),
365*4882a593Smuzhiyun mvsd_read(MVSD_ERR_INTR_STATUS),
366*4882a593Smuzhiyun mvsd_read(MVSD_ERR_INTR_EN));
367*4882a593Smuzhiyun return IRQ_HANDLED;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun spin_lock(&host->lock);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* PIO handling, if needed. Messy business... */
373*4882a593Smuzhiyun if (host->pio_size &&
374*4882a593Smuzhiyun (intr_status & host->intr_en &
375*4882a593Smuzhiyun (MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) {
376*4882a593Smuzhiyun u16 *p = host->pio_ptr;
377*4882a593Smuzhiyun int s = host->pio_size;
378*4882a593Smuzhiyun while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) {
379*4882a593Smuzhiyun readsw(iobase + MVSD_FIFO, p, 16);
380*4882a593Smuzhiyun p += 16;
381*4882a593Smuzhiyun s -= 32;
382*4882a593Smuzhiyun intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun * Normally we'd use < 32 here, but the RX_FIFO_8W bit
386*4882a593Smuzhiyun * doesn't appear to assert when there is exactly 32 bytes
387*4882a593Smuzhiyun * (8 words) left to fetch in a transfer.
388*4882a593Smuzhiyun */
389*4882a593Smuzhiyun if (s <= 32) {
390*4882a593Smuzhiyun while (s >= 4 && (intr_status & MVSD_NOR_RX_READY)) {
391*4882a593Smuzhiyun put_unaligned(mvsd_read(MVSD_FIFO), p++);
392*4882a593Smuzhiyun put_unaligned(mvsd_read(MVSD_FIFO), p++);
393*4882a593Smuzhiyun s -= 4;
394*4882a593Smuzhiyun intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) {
397*4882a593Smuzhiyun u16 val[2] = {0, 0};
398*4882a593Smuzhiyun val[0] = mvsd_read(MVSD_FIFO);
399*4882a593Smuzhiyun val[1] = mvsd_read(MVSD_FIFO);
400*4882a593Smuzhiyun memcpy(p, ((void *)&val) + 4 - s, s);
401*4882a593Smuzhiyun s = 0;
402*4882a593Smuzhiyun intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun if (s == 0) {
405*4882a593Smuzhiyun host->intr_en &=
406*4882a593Smuzhiyun ~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W);
407*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
408*4882a593Smuzhiyun } else if (host->intr_en & MVSD_NOR_RX_FIFO_8W) {
409*4882a593Smuzhiyun host->intr_en &= ~MVSD_NOR_RX_FIFO_8W;
410*4882a593Smuzhiyun host->intr_en |= MVSD_NOR_RX_READY;
411*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
415*4882a593Smuzhiyun s, intr_status, mvsd_read(MVSD_HW_STATE));
416*4882a593Smuzhiyun host->pio_ptr = p;
417*4882a593Smuzhiyun host->pio_size = s;
418*4882a593Smuzhiyun irq_handled = 1;
419*4882a593Smuzhiyun } else if (host->pio_size &&
420*4882a593Smuzhiyun (intr_status & host->intr_en &
421*4882a593Smuzhiyun (MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) {
422*4882a593Smuzhiyun u16 *p = host->pio_ptr;
423*4882a593Smuzhiyun int s = host->pio_size;
424*4882a593Smuzhiyun /*
425*4882a593Smuzhiyun * The TX_FIFO_8W bit is unreliable. When set, bursting
426*4882a593Smuzhiyun * 16 halfwords all at once in the FIFO drops data. Actually
427*4882a593Smuzhiyun * TX_AVAIL does go off after only one word is pushed even if
428*4882a593Smuzhiyun * TX_FIFO_8W remains set.
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun while (s >= 4 && (intr_status & MVSD_NOR_TX_AVAIL)) {
431*4882a593Smuzhiyun mvsd_write(MVSD_FIFO, get_unaligned(p++));
432*4882a593Smuzhiyun mvsd_write(MVSD_FIFO, get_unaligned(p++));
433*4882a593Smuzhiyun s -= 4;
434*4882a593Smuzhiyun intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun if (s < 4) {
437*4882a593Smuzhiyun if (s && (intr_status & MVSD_NOR_TX_AVAIL)) {
438*4882a593Smuzhiyun u16 val[2] = {0, 0};
439*4882a593Smuzhiyun memcpy(((void *)&val) + 4 - s, p, s);
440*4882a593Smuzhiyun mvsd_write(MVSD_FIFO, val[0]);
441*4882a593Smuzhiyun mvsd_write(MVSD_FIFO, val[1]);
442*4882a593Smuzhiyun s = 0;
443*4882a593Smuzhiyun intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun if (s == 0) {
446*4882a593Smuzhiyun host->intr_en &=
447*4882a593Smuzhiyun ~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
448*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
452*4882a593Smuzhiyun s, intr_status, mvsd_read(MVSD_HW_STATE));
453*4882a593Smuzhiyun host->pio_ptr = p;
454*4882a593Smuzhiyun host->pio_size = s;
455*4882a593Smuzhiyun irq_handled = 1;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_STATUS, intr_status);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun intr_done_mask = MVSD_NOR_CARD_INT | MVSD_NOR_RX_READY |
461*4882a593Smuzhiyun MVSD_NOR_RX_FIFO_8W | MVSD_NOR_TX_FIFO_8W;
462*4882a593Smuzhiyun if (intr_status & host->intr_en & ~intr_done_mask) {
463*4882a593Smuzhiyun struct mmc_request *mrq = host->mrq;
464*4882a593Smuzhiyun struct mmc_command *cmd = mrq->cmd;
465*4882a593Smuzhiyun u32 err_status = 0;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun del_timer(&host->timer);
468*4882a593Smuzhiyun host->mrq = NULL;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun host->intr_en &= MVSD_NOR_CARD_INT;
471*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
472*4882a593Smuzhiyun mvsd_write(MVSD_ERR_INTR_EN, 0);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun spin_unlock(&host->lock);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun if (intr_status & MVSD_NOR_UNEXP_RSP) {
477*4882a593Smuzhiyun cmd->error = -EPROTO;
478*4882a593Smuzhiyun } else if (intr_status & MVSD_NOR_ERROR) {
479*4882a593Smuzhiyun err_status = mvsd_read(MVSD_ERR_INTR_STATUS);
480*4882a593Smuzhiyun dev_dbg(host->dev, "err 0x%04x\n", err_status);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun err_status = mvsd_finish_cmd(host, cmd, err_status);
484*4882a593Smuzhiyun if (mrq->data)
485*4882a593Smuzhiyun err_status = mvsd_finish_data(host, mrq->data, err_status);
486*4882a593Smuzhiyun if (err_status) {
487*4882a593Smuzhiyun dev_err(host->dev, "unhandled error status %#04x\n",
488*4882a593Smuzhiyun err_status);
489*4882a593Smuzhiyun cmd->error = -ENOMSG;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun mmc_request_done(host->mmc, mrq);
493*4882a593Smuzhiyun irq_handled = 1;
494*4882a593Smuzhiyun } else
495*4882a593Smuzhiyun spin_unlock(&host->lock);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (intr_status & MVSD_NOR_CARD_INT) {
498*4882a593Smuzhiyun mmc_signal_sdio_irq(host->mmc);
499*4882a593Smuzhiyun irq_handled = 1;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (irq_handled)
503*4882a593Smuzhiyun return IRQ_HANDLED;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n",
506*4882a593Smuzhiyun intr_status, host->intr_en, host->pio_size);
507*4882a593Smuzhiyun return IRQ_NONE;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
mvsd_timeout_timer(struct timer_list * t)510*4882a593Smuzhiyun static void mvsd_timeout_timer(struct timer_list *t)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun struct mvsd_host *host = from_timer(host, t, timer);
513*4882a593Smuzhiyun void __iomem *iobase = host->base;
514*4882a593Smuzhiyun struct mmc_request *mrq;
515*4882a593Smuzhiyun unsigned long flags;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun spin_lock_irqsave(&host->lock, flags);
518*4882a593Smuzhiyun mrq = host->mrq;
519*4882a593Smuzhiyun if (mrq) {
520*4882a593Smuzhiyun dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
521*4882a593Smuzhiyun dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n",
522*4882a593Smuzhiyun mvsd_read(MVSD_HW_STATE),
523*4882a593Smuzhiyun mvsd_read(MVSD_NOR_INTR_STATUS),
524*4882a593Smuzhiyun mvsd_read(MVSD_NOR_INTR_EN));
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun host->mrq = NULL;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN;
531*4882a593Smuzhiyun mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun host->intr_en &= MVSD_NOR_CARD_INT;
534*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
535*4882a593Smuzhiyun mvsd_write(MVSD_ERR_INTR_EN, 0);
536*4882a593Smuzhiyun mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun mrq->cmd->error = -ETIMEDOUT;
539*4882a593Smuzhiyun mvsd_finish_cmd(host, mrq->cmd, 0);
540*4882a593Smuzhiyun if (mrq->data) {
541*4882a593Smuzhiyun mrq->data->error = -ETIMEDOUT;
542*4882a593Smuzhiyun mvsd_finish_data(host, mrq->data, 0);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun spin_unlock_irqrestore(&host->lock, flags);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (mrq)
548*4882a593Smuzhiyun mmc_request_done(host->mmc, mrq);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
mvsd_enable_sdio_irq(struct mmc_host * mmc,int enable)551*4882a593Smuzhiyun static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun struct mvsd_host *host = mmc_priv(mmc);
554*4882a593Smuzhiyun void __iomem *iobase = host->base;
555*4882a593Smuzhiyun unsigned long flags;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun spin_lock_irqsave(&host->lock, flags);
558*4882a593Smuzhiyun if (enable) {
559*4882a593Smuzhiyun host->xfer_mode |= MVSD_XFER_MODE_INT_CHK_EN;
560*4882a593Smuzhiyun host->intr_en |= MVSD_NOR_CARD_INT;
561*4882a593Smuzhiyun } else {
562*4882a593Smuzhiyun host->xfer_mode &= ~MVSD_XFER_MODE_INT_CHK_EN;
563*4882a593Smuzhiyun host->intr_en &= ~MVSD_NOR_CARD_INT;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
566*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
567*4882a593Smuzhiyun spin_unlock_irqrestore(&host->lock, flags);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
mvsd_power_up(struct mvsd_host * host)570*4882a593Smuzhiyun static void mvsd_power_up(struct mvsd_host *host)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun void __iomem *iobase = host->base;
573*4882a593Smuzhiyun dev_dbg(host->dev, "power up\n");
574*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_EN, 0);
575*4882a593Smuzhiyun mvsd_write(MVSD_ERR_INTR_EN, 0);
576*4882a593Smuzhiyun mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
577*4882a593Smuzhiyun mvsd_write(MVSD_XFER_MODE, 0);
578*4882a593Smuzhiyun mvsd_write(MVSD_NOR_STATUS_EN, 0xffff);
579*4882a593Smuzhiyun mvsd_write(MVSD_ERR_STATUS_EN, 0xffff);
580*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff);
581*4882a593Smuzhiyun mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
mvsd_power_down(struct mvsd_host * host)584*4882a593Smuzhiyun static void mvsd_power_down(struct mvsd_host *host)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun void __iomem *iobase = host->base;
587*4882a593Smuzhiyun dev_dbg(host->dev, "power down\n");
588*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_EN, 0);
589*4882a593Smuzhiyun mvsd_write(MVSD_ERR_INTR_EN, 0);
590*4882a593Smuzhiyun mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
591*4882a593Smuzhiyun mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK);
592*4882a593Smuzhiyun mvsd_write(MVSD_NOR_STATUS_EN, 0);
593*4882a593Smuzhiyun mvsd_write(MVSD_ERR_STATUS_EN, 0);
594*4882a593Smuzhiyun mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff);
595*4882a593Smuzhiyun mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
mvsd_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)598*4882a593Smuzhiyun static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun struct mvsd_host *host = mmc_priv(mmc);
601*4882a593Smuzhiyun void __iomem *iobase = host->base;
602*4882a593Smuzhiyun u32 ctrl_reg = 0;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (ios->power_mode == MMC_POWER_UP)
605*4882a593Smuzhiyun mvsd_power_up(host);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (ios->clock == 0) {
608*4882a593Smuzhiyun mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK);
609*4882a593Smuzhiyun mvsd_write(MVSD_CLK_DIV, MVSD_BASE_DIV_MAX);
610*4882a593Smuzhiyun host->clock = 0;
611*4882a593Smuzhiyun dev_dbg(host->dev, "clock off\n");
612*4882a593Smuzhiyun } else if (ios->clock != host->clock) {
613*4882a593Smuzhiyun u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1;
614*4882a593Smuzhiyun if (m > MVSD_BASE_DIV_MAX)
615*4882a593Smuzhiyun m = MVSD_BASE_DIV_MAX;
616*4882a593Smuzhiyun mvsd_write(MVSD_CLK_DIV, m);
617*4882a593Smuzhiyun host->clock = ios->clock;
618*4882a593Smuzhiyun host->ns_per_clk = 1000000000 / (host->base_clock / (m+1));
619*4882a593Smuzhiyun dev_dbg(host->dev, "clock=%d (%d), div=0x%04x\n",
620*4882a593Smuzhiyun ios->clock, host->base_clock / (m+1), m);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* default transfer mode */
624*4882a593Smuzhiyun ctrl_reg |= MVSD_HOST_CTRL_BIG_ENDIAN;
625*4882a593Smuzhiyun ctrl_reg &= ~MVSD_HOST_CTRL_LSB_FIRST;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* default to maximum timeout */
628*4882a593Smuzhiyun ctrl_reg |= MVSD_HOST_CTRL_TMOUT_MASK;
629*4882a593Smuzhiyun ctrl_reg |= MVSD_HOST_CTRL_TMOUT_EN;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (ios->bus_mode == MMC_BUSMODE_PUSHPULL)
632*4882a593Smuzhiyun ctrl_reg |= MVSD_HOST_CTRL_PUSH_PULL_EN;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (ios->bus_width == MMC_BUS_WIDTH_4)
635*4882a593Smuzhiyun ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * The HI_SPEED_EN bit is causing trouble with many (but not all)
639*4882a593Smuzhiyun * high speed SD, SDHC and SDIO cards. Not enabling that bit
640*4882a593Smuzhiyun * makes all cards work. So let's just ignore that bit for now
641*4882a593Smuzhiyun * and revisit this issue if problems for not enabling this bit
642*4882a593Smuzhiyun * are ever reported.
643*4882a593Smuzhiyun */
644*4882a593Smuzhiyun #if 0
645*4882a593Smuzhiyun if (ios->timing == MMC_TIMING_MMC_HS ||
646*4882a593Smuzhiyun ios->timing == MMC_TIMING_SD_HS)
647*4882a593Smuzhiyun ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN;
648*4882a593Smuzhiyun #endif
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun host->ctrl = ctrl_reg;
651*4882a593Smuzhiyun mvsd_write(MVSD_HOST_CTRL, ctrl_reg);
652*4882a593Smuzhiyun dev_dbg(host->dev, "ctrl 0x%04x: %s %s %s\n", ctrl_reg,
653*4882a593Smuzhiyun (ctrl_reg & MVSD_HOST_CTRL_PUSH_PULL_EN) ?
654*4882a593Smuzhiyun "push-pull" : "open-drain",
655*4882a593Smuzhiyun (ctrl_reg & MVSD_HOST_CTRL_DATA_WIDTH_4_BITS) ?
656*4882a593Smuzhiyun "4bit-width" : "1bit-width",
657*4882a593Smuzhiyun (ctrl_reg & MVSD_HOST_CTRL_HI_SPEED_EN) ?
658*4882a593Smuzhiyun "high-speed" : "");
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (ios->power_mode == MMC_POWER_OFF)
661*4882a593Smuzhiyun mvsd_power_down(host);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun static const struct mmc_host_ops mvsd_ops = {
665*4882a593Smuzhiyun .request = mvsd_request,
666*4882a593Smuzhiyun .get_ro = mmc_gpio_get_ro,
667*4882a593Smuzhiyun .set_ios = mvsd_set_ios,
668*4882a593Smuzhiyun .enable_sdio_irq = mvsd_enable_sdio_irq,
669*4882a593Smuzhiyun };
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun static void
mv_conf_mbus_windows(struct mvsd_host * host,const struct mbus_dram_target_info * dram)672*4882a593Smuzhiyun mv_conf_mbus_windows(struct mvsd_host *host,
673*4882a593Smuzhiyun const struct mbus_dram_target_info *dram)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun void __iomem *iobase = host->base;
676*4882a593Smuzhiyun int i;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
679*4882a593Smuzhiyun writel(0, iobase + MVSD_WINDOW_CTRL(i));
680*4882a593Smuzhiyun writel(0, iobase + MVSD_WINDOW_BASE(i));
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun for (i = 0; i < dram->num_cs; i++) {
684*4882a593Smuzhiyun const struct mbus_dram_window *cs = dram->cs + i;
685*4882a593Smuzhiyun writel(((cs->size - 1) & 0xffff0000) |
686*4882a593Smuzhiyun (cs->mbus_attr << 8) |
687*4882a593Smuzhiyun (dram->mbus_dram_target_id << 4) | 1,
688*4882a593Smuzhiyun iobase + MVSD_WINDOW_CTRL(i));
689*4882a593Smuzhiyun writel(cs->base, iobase + MVSD_WINDOW_BASE(i));
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
mvsd_probe(struct platform_device * pdev)693*4882a593Smuzhiyun static int mvsd_probe(struct platform_device *pdev)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun struct device_node *np = pdev->dev.of_node;
696*4882a593Smuzhiyun struct mmc_host *mmc = NULL;
697*4882a593Smuzhiyun struct mvsd_host *host = NULL;
698*4882a593Smuzhiyun const struct mbus_dram_target_info *dram;
699*4882a593Smuzhiyun int ret, irq;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun if (!np) {
702*4882a593Smuzhiyun dev_err(&pdev->dev, "no DT node\n");
703*4882a593Smuzhiyun return -ENODEV;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun irq = platform_get_irq(pdev, 0);
706*4882a593Smuzhiyun if (irq < 0)
707*4882a593Smuzhiyun return -ENXIO;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
710*4882a593Smuzhiyun if (!mmc) {
711*4882a593Smuzhiyun ret = -ENOMEM;
712*4882a593Smuzhiyun goto out;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun host = mmc_priv(mmc);
716*4882a593Smuzhiyun host->mmc = mmc;
717*4882a593Smuzhiyun host->dev = &pdev->dev;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /*
720*4882a593Smuzhiyun * Some non-DT platforms do not pass a clock, and the clock
721*4882a593Smuzhiyun * frequency is passed through platform_data. On DT platforms,
722*4882a593Smuzhiyun * a clock must always be passed, even if there is no gatable
723*4882a593Smuzhiyun * clock associated to the SDIO interface (it can simply be a
724*4882a593Smuzhiyun * fixed rate clock).
725*4882a593Smuzhiyun */
726*4882a593Smuzhiyun host->clk = devm_clk_get(&pdev->dev, NULL);
727*4882a593Smuzhiyun if (IS_ERR(host->clk)) {
728*4882a593Smuzhiyun dev_err(&pdev->dev, "no clock associated\n");
729*4882a593Smuzhiyun ret = -EINVAL;
730*4882a593Smuzhiyun goto out;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun clk_prepare_enable(host->clk);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun mmc->ops = &mvsd_ops;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX);
739*4882a593Smuzhiyun mmc->f_max = MVSD_CLOCKRATE_MAX;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun mmc->max_blk_size = 2048;
742*4882a593Smuzhiyun mmc->max_blk_count = 65535;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun mmc->max_segs = 1;
745*4882a593Smuzhiyun mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
746*4882a593Smuzhiyun mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun host->base_clock = clk_get_rate(host->clk) / 2;
749*4882a593Smuzhiyun ret = mmc_of_parse(mmc);
750*4882a593Smuzhiyun if (ret < 0)
751*4882a593Smuzhiyun goto out;
752*4882a593Smuzhiyun if (maxfreq)
753*4882a593Smuzhiyun mmc->f_max = maxfreq;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun spin_lock_init(&host->lock);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun host->base = devm_platform_ioremap_resource(pdev, 0);
758*4882a593Smuzhiyun if (IS_ERR(host->base)) {
759*4882a593Smuzhiyun ret = PTR_ERR(host->base);
760*4882a593Smuzhiyun goto out;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /* (Re-)program MBUS remapping windows if we are asked to. */
764*4882a593Smuzhiyun dram = mv_mbus_dram_info();
765*4882a593Smuzhiyun if (dram)
766*4882a593Smuzhiyun mv_conf_mbus_windows(host, dram);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun mvsd_power_down(host);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
771*4882a593Smuzhiyun if (ret) {
772*4882a593Smuzhiyun dev_err(&pdev->dev, "cannot assign irq %d\n", irq);
773*4882a593Smuzhiyun goto out;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun timer_setup(&host->timer, mvsd_timeout_timer, 0);
777*4882a593Smuzhiyun platform_set_drvdata(pdev, mmc);
778*4882a593Smuzhiyun ret = mmc_add_host(mmc);
779*4882a593Smuzhiyun if (ret)
780*4882a593Smuzhiyun goto out;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
783*4882a593Smuzhiyun dev_dbg(&pdev->dev, "using GPIO for card detection\n");
784*4882a593Smuzhiyun else
785*4882a593Smuzhiyun dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n");
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun return 0;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun out:
790*4882a593Smuzhiyun if (mmc) {
791*4882a593Smuzhiyun if (!IS_ERR(host->clk))
792*4882a593Smuzhiyun clk_disable_unprepare(host->clk);
793*4882a593Smuzhiyun mmc_free_host(mmc);
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun return ret;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
mvsd_remove(struct platform_device * pdev)799*4882a593Smuzhiyun static int mvsd_remove(struct platform_device *pdev)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun struct mmc_host *mmc = platform_get_drvdata(pdev);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun struct mvsd_host *host = mmc_priv(mmc);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun mmc_remove_host(mmc);
806*4882a593Smuzhiyun del_timer_sync(&host->timer);
807*4882a593Smuzhiyun mvsd_power_down(host);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (!IS_ERR(host->clk))
810*4882a593Smuzhiyun clk_disable_unprepare(host->clk);
811*4882a593Smuzhiyun mmc_free_host(mmc);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun return 0;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun static const struct of_device_id mvsdio_dt_ids[] = {
817*4882a593Smuzhiyun { .compatible = "marvell,orion-sdio" },
818*4882a593Smuzhiyun { /* sentinel */ }
819*4882a593Smuzhiyun };
820*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun static struct platform_driver mvsd_driver = {
823*4882a593Smuzhiyun .probe = mvsd_probe,
824*4882a593Smuzhiyun .remove = mvsd_remove,
825*4882a593Smuzhiyun .driver = {
826*4882a593Smuzhiyun .name = DRIVER_NAME,
827*4882a593Smuzhiyun .probe_type = PROBE_PREFER_ASYNCHRONOUS,
828*4882a593Smuzhiyun .of_match_table = mvsdio_dt_ids,
829*4882a593Smuzhiyun },
830*4882a593Smuzhiyun };
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun module_platform_driver(mvsd_driver);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /* maximum card clock frequency (default 50MHz) */
835*4882a593Smuzhiyun module_param(maxfreq, int, 0);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /* force PIO transfers all the time */
838*4882a593Smuzhiyun module_param(nodma, int, 0);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
841*4882a593Smuzhiyun MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
842*4882a593Smuzhiyun MODULE_LICENSE("GPL");
843*4882a593Smuzhiyun MODULE_ALIAS("platform:mvsdio");
844