xref: /OK3568_Linux_fs/kernel/drivers/mmc/host/wmt-sdmmc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  WM8505/WM8650 SD/MMC Host Controller
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 2010 Tony Prisk
6*4882a593Smuzhiyun  *  Copyright (C) 2008 WonderMedia Technologies, Inc.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/platform_device.h>
12*4882a593Smuzhiyun #include <linux/ioport.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/dma-mapping.h>
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/irq.h>
18*4882a593Smuzhiyun #include <linux/clk.h>
19*4882a593Smuzhiyun #include <linux/interrupt.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <linux/of.h>
22*4882a593Smuzhiyun #include <linux/of_address.h>
23*4882a593Smuzhiyun #include <linux/of_irq.h>
24*4882a593Smuzhiyun #include <linux/of_device.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/mmc/host.h>
27*4882a593Smuzhiyun #include <linux/mmc/mmc.h>
28*4882a593Smuzhiyun #include <linux/mmc/sd.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <asm/byteorder.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define DRIVER_NAME "wmt-sdhc"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* MMC/SD controller registers */
37*4882a593Smuzhiyun #define SDMMC_CTLR			0x00
38*4882a593Smuzhiyun #define SDMMC_CMD			0x01
39*4882a593Smuzhiyun #define SDMMC_RSPTYPE			0x02
40*4882a593Smuzhiyun #define SDMMC_ARG			0x04
41*4882a593Smuzhiyun #define SDMMC_BUSMODE			0x08
42*4882a593Smuzhiyun #define SDMMC_BLKLEN			0x0C
43*4882a593Smuzhiyun #define SDMMC_BLKCNT			0x0E
44*4882a593Smuzhiyun #define SDMMC_RSP			0x10
45*4882a593Smuzhiyun #define SDMMC_CBCR			0x20
46*4882a593Smuzhiyun #define SDMMC_INTMASK0			0x24
47*4882a593Smuzhiyun #define SDMMC_INTMASK1			0x25
48*4882a593Smuzhiyun #define SDMMC_STS0			0x28
49*4882a593Smuzhiyun #define SDMMC_STS1			0x29
50*4882a593Smuzhiyun #define SDMMC_STS2			0x2A
51*4882a593Smuzhiyun #define SDMMC_STS3			0x2B
52*4882a593Smuzhiyun #define SDMMC_RSPTIMEOUT		0x2C
53*4882a593Smuzhiyun #define SDMMC_CLK			0x30	/* VT8500 only */
54*4882a593Smuzhiyun #define SDMMC_EXTCTRL			0x34
55*4882a593Smuzhiyun #define SDMMC_SBLKLEN			0x38
56*4882a593Smuzhiyun #define SDMMC_DMATIMEOUT		0x3C
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /* SDMMC_CTLR bit fields */
60*4882a593Smuzhiyun #define CTLR_CMD_START			0x01
61*4882a593Smuzhiyun #define CTLR_CMD_WRITE			0x04
62*4882a593Smuzhiyun #define CTLR_FIFO_RESET			0x08
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /* SDMMC_BUSMODE bit fields */
65*4882a593Smuzhiyun #define BM_SPI_MODE			0x01
66*4882a593Smuzhiyun #define BM_FOURBIT_MODE			0x02
67*4882a593Smuzhiyun #define BM_EIGHTBIT_MODE		0x04
68*4882a593Smuzhiyun #define BM_SD_OFF			0x10
69*4882a593Smuzhiyun #define BM_SPI_CS			0x20
70*4882a593Smuzhiyun #define BM_SD_POWER			0x40
71*4882a593Smuzhiyun #define BM_SOFT_RESET			0x80
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /* SDMMC_BLKLEN bit fields */
74*4882a593Smuzhiyun #define BLKL_CRCERR_ABORT		0x0800
75*4882a593Smuzhiyun #define BLKL_CD_POL_HIGH		0x1000
76*4882a593Smuzhiyun #define BLKL_GPI_CD			0x2000
77*4882a593Smuzhiyun #define BLKL_DATA3_CD			0x4000
78*4882a593Smuzhiyun #define BLKL_INT_ENABLE			0x8000
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* SDMMC_INTMASK0 bit fields */
81*4882a593Smuzhiyun #define INT0_MBLK_TRAN_DONE_INT_EN	0x10
82*4882a593Smuzhiyun #define INT0_BLK_TRAN_DONE_INT_EN	0x20
83*4882a593Smuzhiyun #define INT0_CD_INT_EN			0x40
84*4882a593Smuzhiyun #define INT0_DI_INT_EN			0x80
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /* SDMMC_INTMASK1 bit fields */
87*4882a593Smuzhiyun #define INT1_CMD_RES_TRAN_DONE_INT_EN	0x02
88*4882a593Smuzhiyun #define INT1_CMD_RES_TOUT_INT_EN	0x04
89*4882a593Smuzhiyun #define INT1_MBLK_AUTO_STOP_INT_EN	0x08
90*4882a593Smuzhiyun #define INT1_DATA_TOUT_INT_EN		0x10
91*4882a593Smuzhiyun #define INT1_RESCRC_ERR_INT_EN		0x20
92*4882a593Smuzhiyun #define INT1_RCRC_ERR_INT_EN		0x40
93*4882a593Smuzhiyun #define INT1_WCRC_ERR_INT_EN		0x80
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /* SDMMC_STS0 bit fields */
96*4882a593Smuzhiyun #define STS0_WRITE_PROTECT		0x02
97*4882a593Smuzhiyun #define STS0_CD_DATA3			0x04
98*4882a593Smuzhiyun #define STS0_CD_GPI			0x08
99*4882a593Smuzhiyun #define STS0_MBLK_DONE			0x10
100*4882a593Smuzhiyun #define STS0_BLK_DONE			0x20
101*4882a593Smuzhiyun #define STS0_CARD_DETECT		0x40
102*4882a593Smuzhiyun #define STS0_DEVICE_INS			0x80
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* SDMMC_STS1 bit fields */
105*4882a593Smuzhiyun #define STS1_SDIO_INT			0x01
106*4882a593Smuzhiyun #define STS1_CMDRSP_DONE		0x02
107*4882a593Smuzhiyun #define STS1_RSP_TIMEOUT		0x04
108*4882a593Smuzhiyun #define STS1_AUTOSTOP_DONE		0x08
109*4882a593Smuzhiyun #define STS1_DATA_TIMEOUT		0x10
110*4882a593Smuzhiyun #define STS1_RSP_CRC_ERR		0x20
111*4882a593Smuzhiyun #define STS1_RCRC_ERR			0x40
112*4882a593Smuzhiyun #define STS1_WCRC_ERR			0x80
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /* SDMMC_STS2 bit fields */
115*4882a593Smuzhiyun #define STS2_CMD_RES_BUSY		0x10
116*4882a593Smuzhiyun #define STS2_DATARSP_BUSY		0x20
117*4882a593Smuzhiyun #define STS2_DIS_FORCECLK		0x80
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /* SDMMC_EXTCTRL bit fields */
120*4882a593Smuzhiyun #define EXT_EIGHTBIT			0x04
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /* MMC/SD DMA Controller Registers */
123*4882a593Smuzhiyun #define SDDMA_GCR			0x100
124*4882a593Smuzhiyun #define SDDMA_IER			0x104
125*4882a593Smuzhiyun #define SDDMA_ISR			0x108
126*4882a593Smuzhiyun #define SDDMA_DESPR			0x10C
127*4882a593Smuzhiyun #define SDDMA_RBR			0x110
128*4882a593Smuzhiyun #define SDDMA_DAR			0x114
129*4882a593Smuzhiyun #define SDDMA_BAR			0x118
130*4882a593Smuzhiyun #define SDDMA_CPR			0x11C
131*4882a593Smuzhiyun #define SDDMA_CCR			0x120
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /* SDDMA_GCR bit fields */
135*4882a593Smuzhiyun #define DMA_GCR_DMA_EN			0x00000001
136*4882a593Smuzhiyun #define DMA_GCR_SOFT_RESET		0x00000100
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /* SDDMA_IER bit fields */
139*4882a593Smuzhiyun #define DMA_IER_INT_EN			0x00000001
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /* SDDMA_ISR bit fields */
142*4882a593Smuzhiyun #define DMA_ISR_INT_STS			0x00000001
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /* SDDMA_RBR bit fields */
145*4882a593Smuzhiyun #define DMA_RBR_FORMAT			0x40000000
146*4882a593Smuzhiyun #define DMA_RBR_END			0x80000000
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /* SDDMA_CCR bit fields */
149*4882a593Smuzhiyun #define DMA_CCR_RUN			0x00000080
150*4882a593Smuzhiyun #define DMA_CCR_IF_TO_PERIPHERAL	0x00000000
151*4882a593Smuzhiyun #define DMA_CCR_PERIPHERAL_TO_IF	0x00400000
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /* SDDMA_CCR event status */
154*4882a593Smuzhiyun #define DMA_CCR_EVT_NO_STATUS		0x00000000
155*4882a593Smuzhiyun #define DMA_CCR_EVT_UNDERRUN		0x00000001
156*4882a593Smuzhiyun #define DMA_CCR_EVT_OVERRUN		0x00000002
157*4882a593Smuzhiyun #define DMA_CCR_EVT_DESP_READ		0x00000003
158*4882a593Smuzhiyun #define DMA_CCR_EVT_DATA_RW		0x00000004
159*4882a593Smuzhiyun #define DMA_CCR_EVT_EARLY_END		0x00000005
160*4882a593Smuzhiyun #define DMA_CCR_EVT_SUCCESS		0x0000000F
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun #define PDMA_READ			0x00
163*4882a593Smuzhiyun #define PDMA_WRITE			0x01
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #define WMT_SD_POWER_OFF		0
166*4882a593Smuzhiyun #define WMT_SD_POWER_ON			1
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun struct wmt_dma_descriptor {
169*4882a593Smuzhiyun 	u32 flags;
170*4882a593Smuzhiyun 	u32 data_buffer_addr;
171*4882a593Smuzhiyun 	u32 branch_addr;
172*4882a593Smuzhiyun 	u32 reserved1;
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun struct wmt_mci_caps {
176*4882a593Smuzhiyun 	unsigned int	f_min;
177*4882a593Smuzhiyun 	unsigned int	f_max;
178*4882a593Smuzhiyun 	u32		ocr_avail;
179*4882a593Smuzhiyun 	u32		caps;
180*4882a593Smuzhiyun 	u32		max_seg_size;
181*4882a593Smuzhiyun 	u32		max_segs;
182*4882a593Smuzhiyun 	u32		max_blk_size;
183*4882a593Smuzhiyun };
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun struct wmt_mci_priv {
186*4882a593Smuzhiyun 	struct mmc_host *mmc;
187*4882a593Smuzhiyun 	void __iomem *sdmmc_base;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	int irq_regular;
190*4882a593Smuzhiyun 	int irq_dma;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	void *dma_desc_buffer;
193*4882a593Smuzhiyun 	dma_addr_t dma_desc_device_addr;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	struct completion cmdcomp;
196*4882a593Smuzhiyun 	struct completion datacomp;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	struct completion *comp_cmd;
199*4882a593Smuzhiyun 	struct completion *comp_dma;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	struct mmc_request *req;
202*4882a593Smuzhiyun 	struct mmc_command *cmd;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	struct clk *clk_sdmmc;
205*4882a593Smuzhiyun 	struct device *dev;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	u8 power_inverted;
208*4882a593Smuzhiyun 	u8 cd_inverted;
209*4882a593Smuzhiyun };
210*4882a593Smuzhiyun 
wmt_set_sd_power(struct wmt_mci_priv * priv,int enable)211*4882a593Smuzhiyun static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (enable ^ priv->power_inverted)
216*4882a593Smuzhiyun 		reg_tmp &= ~BM_SD_OFF;
217*4882a593Smuzhiyun 	else
218*4882a593Smuzhiyun 		reg_tmp |= BM_SD_OFF;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
wmt_mci_read_response(struct mmc_host * mmc)223*4882a593Smuzhiyun static void wmt_mci_read_response(struct mmc_host *mmc)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
226*4882a593Smuzhiyun 	int idx1, idx2;
227*4882a593Smuzhiyun 	u8 tmp_resp;
228*4882a593Smuzhiyun 	u32 response;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	for (idx1 = 0; idx1 < 4; idx1++) {
233*4882a593Smuzhiyun 		response = 0;
234*4882a593Smuzhiyun 		for (idx2 = 0; idx2 < 4; idx2++) {
235*4882a593Smuzhiyun 			if ((idx1 == 3) && (idx2 == 3))
236*4882a593Smuzhiyun 				tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP);
237*4882a593Smuzhiyun 			else
238*4882a593Smuzhiyun 				tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP +
239*4882a593Smuzhiyun 						 (idx1*4) + idx2 + 1);
240*4882a593Smuzhiyun 			response |= (tmp_resp << (idx2 * 8));
241*4882a593Smuzhiyun 		}
242*4882a593Smuzhiyun 		priv->cmd->resp[idx1] = cpu_to_be32(response);
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
wmt_mci_start_command(struct wmt_mci_priv * priv)246*4882a593Smuzhiyun static void wmt_mci_start_command(struct wmt_mci_priv *priv)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	u32 reg_tmp;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
251*4882a593Smuzhiyun 	writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
wmt_mci_send_command(struct mmc_host * mmc,u8 command,u8 cmdtype,u32 arg,u8 rsptype)254*4882a593Smuzhiyun static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype,
255*4882a593Smuzhiyun 				u32 arg, u8 rsptype)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
258*4882a593Smuzhiyun 	u32 reg_tmp;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	/* write command, arg, resptype registers */
263*4882a593Smuzhiyun 	writeb(command, priv->sdmmc_base + SDMMC_CMD);
264*4882a593Smuzhiyun 	writel(arg, priv->sdmmc_base + SDMMC_ARG);
265*4882a593Smuzhiyun 	writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* reset response FIFO */
268*4882a593Smuzhiyun 	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
269*4882a593Smuzhiyun 	writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/* ensure clock enabled - VT3465 */
272*4882a593Smuzhiyun 	wmt_set_sd_power(priv, WMT_SD_POWER_ON);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	/* clear status bits */
275*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
276*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
277*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS2);
278*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS3);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* set command type */
281*4882a593Smuzhiyun 	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
282*4882a593Smuzhiyun 	writeb((reg_tmp & 0x0F) | (cmdtype << 4),
283*4882a593Smuzhiyun 	       priv->sdmmc_base + SDMMC_CTLR);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
wmt_mci_disable_dma(struct wmt_mci_priv * priv)288*4882a593Smuzhiyun static void wmt_mci_disable_dma(struct wmt_mci_priv *priv)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR);
291*4882a593Smuzhiyun 	writel(0, priv->sdmmc_base + SDDMA_IER);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
wmt_complete_data_request(struct wmt_mci_priv * priv)294*4882a593Smuzhiyun static void wmt_complete_data_request(struct wmt_mci_priv *priv)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct mmc_request *req;
297*4882a593Smuzhiyun 	req = priv->req;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	req->data->bytes_xfered = req->data->blksz * req->data->blocks;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/* unmap the DMA pages used for write data */
302*4882a593Smuzhiyun 	if (req->data->flags & MMC_DATA_WRITE)
303*4882a593Smuzhiyun 		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
304*4882a593Smuzhiyun 			     req->data->sg_len, DMA_TO_DEVICE);
305*4882a593Smuzhiyun 	else
306*4882a593Smuzhiyun 		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg,
307*4882a593Smuzhiyun 			     req->data->sg_len, DMA_FROM_DEVICE);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	/* Check if the DMA ISR returned a data error */
310*4882a593Smuzhiyun 	if ((req->cmd->error) || (req->data->error))
311*4882a593Smuzhiyun 		mmc_request_done(priv->mmc, req);
312*4882a593Smuzhiyun 	else {
313*4882a593Smuzhiyun 		wmt_mci_read_response(priv->mmc);
314*4882a593Smuzhiyun 		if (!req->data->stop) {
315*4882a593Smuzhiyun 			/* single-block read/write requests end here */
316*4882a593Smuzhiyun 			mmc_request_done(priv->mmc, req);
317*4882a593Smuzhiyun 		} else {
318*4882a593Smuzhiyun 			/*
319*4882a593Smuzhiyun 			 * we change the priv->cmd variable so the response is
320*4882a593Smuzhiyun 			 * stored in the stop struct rather than the original
321*4882a593Smuzhiyun 			 * calling command struct
322*4882a593Smuzhiyun 			 */
323*4882a593Smuzhiyun 			priv->comp_cmd = &priv->cmdcomp;
324*4882a593Smuzhiyun 			init_completion(priv->comp_cmd);
325*4882a593Smuzhiyun 			priv->cmd = req->data->stop;
326*4882a593Smuzhiyun 			wmt_mci_send_command(priv->mmc, req->data->stop->opcode,
327*4882a593Smuzhiyun 					     7, req->data->stop->arg, 9);
328*4882a593Smuzhiyun 			wmt_mci_start_command(priv);
329*4882a593Smuzhiyun 		}
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
wmt_mci_dma_isr(int irq_num,void * data)333*4882a593Smuzhiyun static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	int status;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	priv = (struct wmt_mci_priv *)data;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	if (status != DMA_CCR_EVT_SUCCESS) {
344*4882a593Smuzhiyun 		dev_err(priv->dev, "DMA Error: Status = %d\n", status);
345*4882a593Smuzhiyun 		priv->req->data->error = -ETIMEDOUT;
346*4882a593Smuzhiyun 		complete(priv->comp_dma);
347*4882a593Smuzhiyun 		return IRQ_HANDLED;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	priv->req->data->error = 0;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	wmt_mci_disable_dma(priv);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	complete(priv->comp_dma);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (priv->comp_cmd) {
357*4882a593Smuzhiyun 		if (completion_done(priv->comp_cmd)) {
358*4882a593Smuzhiyun 			/*
359*4882a593Smuzhiyun 			 * if the command (regular) interrupt has already
360*4882a593Smuzhiyun 			 * completed, finish off the request otherwise we wait
361*4882a593Smuzhiyun 			 * for the command interrupt and finish from there.
362*4882a593Smuzhiyun 			 */
363*4882a593Smuzhiyun 			wmt_complete_data_request(priv);
364*4882a593Smuzhiyun 		}
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	return IRQ_HANDLED;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
wmt_mci_regular_isr(int irq_num,void * data)370*4882a593Smuzhiyun static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
373*4882a593Smuzhiyun 	u32 status0;
374*4882a593Smuzhiyun 	u32 status1;
375*4882a593Smuzhiyun 	u32 status2;
376*4882a593Smuzhiyun 	u32 reg_tmp;
377*4882a593Smuzhiyun 	int cmd_done;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	priv = (struct wmt_mci_priv *)data;
380*4882a593Smuzhiyun 	cmd_done = 0;
381*4882a593Smuzhiyun 	status0 = readb(priv->sdmmc_base + SDMMC_STS0);
382*4882a593Smuzhiyun 	status1 = readb(priv->sdmmc_base + SDMMC_STS1);
383*4882a593Smuzhiyun 	status2 = readb(priv->sdmmc_base + SDMMC_STS2);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* Check for card insertion */
386*4882a593Smuzhiyun 	reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
387*4882a593Smuzhiyun 	if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) {
388*4882a593Smuzhiyun 		mmc_detect_change(priv->mmc, 0);
389*4882a593Smuzhiyun 		if (priv->cmd)
390*4882a593Smuzhiyun 			priv->cmd->error = -ETIMEDOUT;
391*4882a593Smuzhiyun 		if (priv->comp_cmd)
392*4882a593Smuzhiyun 			complete(priv->comp_cmd);
393*4882a593Smuzhiyun 		if (priv->comp_dma) {
394*4882a593Smuzhiyun 			wmt_mci_disable_dma(priv);
395*4882a593Smuzhiyun 			complete(priv->comp_dma);
396*4882a593Smuzhiyun 		}
397*4882a593Smuzhiyun 		writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0);
398*4882a593Smuzhiyun 		return IRQ_HANDLED;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	if ((!priv->req->data) ||
402*4882a593Smuzhiyun 	    ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) {
403*4882a593Smuzhiyun 		/* handle non-data & stop_transmission requests */
404*4882a593Smuzhiyun 		if (status1 & STS1_CMDRSP_DONE) {
405*4882a593Smuzhiyun 			priv->cmd->error = 0;
406*4882a593Smuzhiyun 			cmd_done = 1;
407*4882a593Smuzhiyun 		} else if ((status1 & STS1_RSP_TIMEOUT) ||
408*4882a593Smuzhiyun 			   (status1 & STS1_DATA_TIMEOUT)) {
409*4882a593Smuzhiyun 			priv->cmd->error = -ETIMEDOUT;
410*4882a593Smuzhiyun 			cmd_done = 1;
411*4882a593Smuzhiyun 		}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		if (cmd_done) {
414*4882a593Smuzhiyun 			priv->comp_cmd = NULL;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 			if (!priv->cmd->error)
417*4882a593Smuzhiyun 				wmt_mci_read_response(priv->mmc);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 			priv->cmd = NULL;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 			mmc_request_done(priv->mmc, priv->req);
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 	} else {
424*4882a593Smuzhiyun 		/* handle data requests */
425*4882a593Smuzhiyun 		if (status1 & STS1_CMDRSP_DONE) {
426*4882a593Smuzhiyun 			if (priv->cmd)
427*4882a593Smuzhiyun 				priv->cmd->error = 0;
428*4882a593Smuzhiyun 			if (priv->comp_cmd)
429*4882a593Smuzhiyun 				complete(priv->comp_cmd);
430*4882a593Smuzhiyun 		}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		if ((status1 & STS1_RSP_TIMEOUT) ||
433*4882a593Smuzhiyun 		    (status1 & STS1_DATA_TIMEOUT)) {
434*4882a593Smuzhiyun 			if (priv->cmd)
435*4882a593Smuzhiyun 				priv->cmd->error = -ETIMEDOUT;
436*4882a593Smuzhiyun 			if (priv->comp_cmd)
437*4882a593Smuzhiyun 				complete(priv->comp_cmd);
438*4882a593Smuzhiyun 			if (priv->comp_dma) {
439*4882a593Smuzhiyun 				wmt_mci_disable_dma(priv);
440*4882a593Smuzhiyun 				complete(priv->comp_dma);
441*4882a593Smuzhiyun 			}
442*4882a593Smuzhiyun 		}
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 		if (priv->comp_dma) {
445*4882a593Smuzhiyun 			/*
446*4882a593Smuzhiyun 			 * If the dma interrupt has already completed, finish
447*4882a593Smuzhiyun 			 * off the request; otherwise we wait for the DMA
448*4882a593Smuzhiyun 			 * interrupt and finish from there.
449*4882a593Smuzhiyun 			 */
450*4882a593Smuzhiyun 			if (completion_done(priv->comp_dma))
451*4882a593Smuzhiyun 				wmt_complete_data_request(priv);
452*4882a593Smuzhiyun 		}
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	writeb(status0, priv->sdmmc_base + SDMMC_STS0);
456*4882a593Smuzhiyun 	writeb(status1, priv->sdmmc_base + SDMMC_STS1);
457*4882a593Smuzhiyun 	writeb(status2, priv->sdmmc_base + SDMMC_STS2);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	return IRQ_HANDLED;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
wmt_reset_hardware(struct mmc_host * mmc)462*4882a593Smuzhiyun static void wmt_reset_hardware(struct mmc_host *mmc)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
465*4882a593Smuzhiyun 	u32 reg_tmp;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	/* reset controller */
470*4882a593Smuzhiyun 	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
471*4882a593Smuzhiyun 	writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/* reset response FIFO */
474*4882a593Smuzhiyun 	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR);
475*4882a593Smuzhiyun 	writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	/* enable GPI pin to detect card */
478*4882a593Smuzhiyun 	writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	/* clear interrupt status */
481*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
482*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/* setup interrupts */
485*4882a593Smuzhiyun 	writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base +
486*4882a593Smuzhiyun 	       SDMMC_INTMASK0);
487*4882a593Smuzhiyun 	writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN |
488*4882a593Smuzhiyun 	       INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	/* set the DMA timeout */
491*4882a593Smuzhiyun 	writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* auto clock freezing enable */
494*4882a593Smuzhiyun 	reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2);
495*4882a593Smuzhiyun 	writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/* set a default clock speed of 400Khz */
498*4882a593Smuzhiyun 	clk_set_rate(priv->clk_sdmmc, 400000);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
wmt_dma_init(struct mmc_host * mmc)501*4882a593Smuzhiyun static int wmt_dma_init(struct mmc_host *mmc)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR);
508*4882a593Smuzhiyun 	writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR);
509*4882a593Smuzhiyun 	if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0)
510*4882a593Smuzhiyun 		return 0;
511*4882a593Smuzhiyun 	else
512*4882a593Smuzhiyun 		return 1;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
wmt_dma_init_descriptor(struct wmt_dma_descriptor * desc,u16 req_count,u32 buffer_addr,u32 branch_addr,int end)515*4882a593Smuzhiyun static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc,
516*4882a593Smuzhiyun 		u16 req_count, u32 buffer_addr, u32 branch_addr, int end)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	desc->flags = 0x40000000 | req_count;
519*4882a593Smuzhiyun 	if (end)
520*4882a593Smuzhiyun 		desc->flags |= 0x80000000;
521*4882a593Smuzhiyun 	desc->data_buffer_addr = buffer_addr;
522*4882a593Smuzhiyun 	desc->branch_addr = branch_addr;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
wmt_dma_config(struct mmc_host * mmc,u32 descaddr,u8 dir)525*4882a593Smuzhiyun static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
528*4882a593Smuzhiyun 	u32 reg_tmp;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	/* Enable DMA Interrupts */
533*4882a593Smuzhiyun 	writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/* Write DMA Descriptor Pointer Register */
536*4882a593Smuzhiyun 	writel(descaddr, priv->sdmmc_base + SDDMA_DESPR);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	writel(0x00, priv->sdmmc_base + SDDMA_CCR);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	if (dir == PDMA_WRITE) {
541*4882a593Smuzhiyun 		reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
542*4882a593Smuzhiyun 		writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base +
543*4882a593Smuzhiyun 		       SDDMA_CCR);
544*4882a593Smuzhiyun 	} else {
545*4882a593Smuzhiyun 		reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
546*4882a593Smuzhiyun 		writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base +
547*4882a593Smuzhiyun 		       SDDMA_CCR);
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
wmt_dma_start(struct wmt_mci_priv * priv)551*4882a593Smuzhiyun static void wmt_dma_start(struct wmt_mci_priv *priv)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	u32 reg_tmp;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR);
556*4882a593Smuzhiyun 	writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
wmt_mci_request(struct mmc_host * mmc,struct mmc_request * req)559*4882a593Smuzhiyun static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
562*4882a593Smuzhiyun 	struct wmt_dma_descriptor *desc;
563*4882a593Smuzhiyun 	u8 command;
564*4882a593Smuzhiyun 	u8 cmdtype;
565*4882a593Smuzhiyun 	u32 arg;
566*4882a593Smuzhiyun 	u8 rsptype;
567*4882a593Smuzhiyun 	u32 reg_tmp;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	struct scatterlist *sg;
570*4882a593Smuzhiyun 	int i;
571*4882a593Smuzhiyun 	int sg_cnt;
572*4882a593Smuzhiyun 	int offset;
573*4882a593Smuzhiyun 	u32 dma_address;
574*4882a593Smuzhiyun 	int desc_cnt;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
577*4882a593Smuzhiyun 	priv->req = req;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/*
580*4882a593Smuzhiyun 	 * Use the cmd variable to pass a pointer to the resp[] structure
581*4882a593Smuzhiyun 	 * This is required on multi-block requests to pass the pointer to the
582*4882a593Smuzhiyun 	 * stop command
583*4882a593Smuzhiyun 	 */
584*4882a593Smuzhiyun 	priv->cmd = req->cmd;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	command = req->cmd->opcode;
587*4882a593Smuzhiyun 	arg = req->cmd->arg;
588*4882a593Smuzhiyun 	rsptype = mmc_resp_type(req->cmd);
589*4882a593Smuzhiyun 	cmdtype = 0;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	/* rsptype=7 only valid for SPI commands - should be =2 for SD */
592*4882a593Smuzhiyun 	if (rsptype == 7)
593*4882a593Smuzhiyun 		rsptype = 2;
594*4882a593Smuzhiyun 	/* rsptype=21 is R1B, convert for controller */
595*4882a593Smuzhiyun 	if (rsptype == 21)
596*4882a593Smuzhiyun 		rsptype = 9;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	if (!req->data) {
599*4882a593Smuzhiyun 		wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
600*4882a593Smuzhiyun 		wmt_mci_start_command(priv);
601*4882a593Smuzhiyun 		/* completion is now handled in the regular_isr() */
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun 	if (req->data) {
604*4882a593Smuzhiyun 		priv->comp_cmd = &priv->cmdcomp;
605*4882a593Smuzhiyun 		init_completion(priv->comp_cmd);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 		wmt_dma_init(mmc);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 		/* set controller data length */
610*4882a593Smuzhiyun 		reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
611*4882a593Smuzhiyun 		writew((reg_tmp & 0xF800) | (req->data->blksz - 1),
612*4882a593Smuzhiyun 		       priv->sdmmc_base + SDMMC_BLKLEN);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 		/* set controller block count */
615*4882a593Smuzhiyun 		writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 		if (req->data->flags & MMC_DATA_WRITE) {
620*4882a593Smuzhiyun 			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
621*4882a593Smuzhiyun 					    req->data->sg_len, DMA_TO_DEVICE);
622*4882a593Smuzhiyun 			cmdtype = 1;
623*4882a593Smuzhiyun 			if (req->data->blocks > 1)
624*4882a593Smuzhiyun 				cmdtype = 3;
625*4882a593Smuzhiyun 		} else {
626*4882a593Smuzhiyun 			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg,
627*4882a593Smuzhiyun 					    req->data->sg_len, DMA_FROM_DEVICE);
628*4882a593Smuzhiyun 			cmdtype = 2;
629*4882a593Smuzhiyun 			if (req->data->blocks > 1)
630*4882a593Smuzhiyun 				cmdtype = 4;
631*4882a593Smuzhiyun 		}
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 		dma_address = priv->dma_desc_device_addr + 16;
634*4882a593Smuzhiyun 		desc_cnt = 0;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 		for_each_sg(req->data->sg, sg, sg_cnt, i) {
637*4882a593Smuzhiyun 			offset = 0;
638*4882a593Smuzhiyun 			while (offset < sg_dma_len(sg)) {
639*4882a593Smuzhiyun 				wmt_dma_init_descriptor(desc, req->data->blksz,
640*4882a593Smuzhiyun 						sg_dma_address(sg)+offset,
641*4882a593Smuzhiyun 						dma_address, 0);
642*4882a593Smuzhiyun 				desc++;
643*4882a593Smuzhiyun 				desc_cnt++;
644*4882a593Smuzhiyun 				offset += req->data->blksz;
645*4882a593Smuzhiyun 				dma_address += 16;
646*4882a593Smuzhiyun 				if (desc_cnt == req->data->blocks)
647*4882a593Smuzhiyun 					break;
648*4882a593Smuzhiyun 			}
649*4882a593Smuzhiyun 		}
650*4882a593Smuzhiyun 		desc--;
651*4882a593Smuzhiyun 		desc->flags |= 0x80000000;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		if (req->data->flags & MMC_DATA_WRITE)
654*4882a593Smuzhiyun 			wmt_dma_config(mmc, priv->dma_desc_device_addr,
655*4882a593Smuzhiyun 				       PDMA_WRITE);
656*4882a593Smuzhiyun 		else
657*4882a593Smuzhiyun 			wmt_dma_config(mmc, priv->dma_desc_device_addr,
658*4882a593Smuzhiyun 				       PDMA_READ);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 		wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 		priv->comp_dma = &priv->datacomp;
663*4882a593Smuzhiyun 		init_completion(priv->comp_dma);
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 		wmt_dma_start(priv);
666*4882a593Smuzhiyun 		wmt_mci_start_command(priv);
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
wmt_mci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)670*4882a593Smuzhiyun static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
673*4882a593Smuzhiyun 	u32 busmode, extctrl;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	if (ios->power_mode == MMC_POWER_UP) {
678*4882a593Smuzhiyun 		wmt_reset_hardware(mmc);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 		wmt_set_sd_power(priv, WMT_SD_POWER_ON);
681*4882a593Smuzhiyun 	}
682*4882a593Smuzhiyun 	if (ios->power_mode == MMC_POWER_OFF)
683*4882a593Smuzhiyun 		wmt_set_sd_power(priv, WMT_SD_POWER_OFF);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	if (ios->clock != 0)
686*4882a593Smuzhiyun 		clk_set_rate(priv->clk_sdmmc, ios->clock);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE);
689*4882a593Smuzhiyun 	extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE);
692*4882a593Smuzhiyun 	extctrl &= ~EXT_EIGHTBIT;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	switch (ios->bus_width) {
695*4882a593Smuzhiyun 	case MMC_BUS_WIDTH_8:
696*4882a593Smuzhiyun 		busmode |= BM_EIGHTBIT_MODE;
697*4882a593Smuzhiyun 		extctrl |= EXT_EIGHTBIT;
698*4882a593Smuzhiyun 		break;
699*4882a593Smuzhiyun 	case MMC_BUS_WIDTH_4:
700*4882a593Smuzhiyun 		busmode |= BM_FOURBIT_MODE;
701*4882a593Smuzhiyun 		break;
702*4882a593Smuzhiyun 	case MMC_BUS_WIDTH_1:
703*4882a593Smuzhiyun 		break;
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE);
707*4882a593Smuzhiyun 	writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun 
wmt_mci_get_ro(struct mmc_host * mmc)710*4882a593Smuzhiyun static int wmt_mci_get_ro(struct mmc_host *mmc)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun 	struct wmt_mci_priv *priv = mmc_priv(mmc);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
wmt_mci_get_cd(struct mmc_host * mmc)717*4882a593Smuzhiyun static int wmt_mci_get_cd(struct mmc_host *mmc)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	struct wmt_mci_priv *priv = mmc_priv(mmc);
720*4882a593Smuzhiyun 	u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	return !(cd ^ priv->cd_inverted);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun static const struct mmc_host_ops wmt_mci_ops = {
726*4882a593Smuzhiyun 	.request = wmt_mci_request,
727*4882a593Smuzhiyun 	.set_ios = wmt_mci_set_ios,
728*4882a593Smuzhiyun 	.get_ro = wmt_mci_get_ro,
729*4882a593Smuzhiyun 	.get_cd = wmt_mci_get_cd,
730*4882a593Smuzhiyun };
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun /* Controller capabilities */
733*4882a593Smuzhiyun static struct wmt_mci_caps wm8505_caps = {
734*4882a593Smuzhiyun 	.f_min = 390425,
735*4882a593Smuzhiyun 	.f_max = 50000000,
736*4882a593Smuzhiyun 	.ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34,
737*4882a593Smuzhiyun 	.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |
738*4882a593Smuzhiyun 		MMC_CAP_SD_HIGHSPEED,
739*4882a593Smuzhiyun 	.max_seg_size = 65024,
740*4882a593Smuzhiyun 	.max_segs = 128,
741*4882a593Smuzhiyun 	.max_blk_size = 2048,
742*4882a593Smuzhiyun };
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun static const struct of_device_id wmt_mci_dt_ids[] = {
745*4882a593Smuzhiyun 	{ .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps },
746*4882a593Smuzhiyun 	{ /* Sentinel */ },
747*4882a593Smuzhiyun };
748*4882a593Smuzhiyun 
wmt_mci_probe(struct platform_device * pdev)749*4882a593Smuzhiyun static int wmt_mci_probe(struct platform_device *pdev)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun 	struct mmc_host *mmc;
752*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
753*4882a593Smuzhiyun 	struct device_node *np = pdev->dev.of_node;
754*4882a593Smuzhiyun 	const struct of_device_id *of_id =
755*4882a593Smuzhiyun 		of_match_device(wmt_mci_dt_ids, &pdev->dev);
756*4882a593Smuzhiyun 	const struct wmt_mci_caps *wmt_caps;
757*4882a593Smuzhiyun 	int ret;
758*4882a593Smuzhiyun 	int regular_irq, dma_irq;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	if (!of_id || !of_id->data) {
761*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Controller capabilities data missing\n");
762*4882a593Smuzhiyun 		return -EFAULT;
763*4882a593Smuzhiyun 	}
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	wmt_caps = of_id->data;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	if (!np) {
768*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n");
769*4882a593Smuzhiyun 		return -EFAULT;
770*4882a593Smuzhiyun 	}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	regular_irq = irq_of_parse_and_map(np, 0);
773*4882a593Smuzhiyun 	dma_irq = irq_of_parse_and_map(np, 1);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	if (!regular_irq || !dma_irq) {
776*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Getting IRQs failed!\n");
777*4882a593Smuzhiyun 		ret = -ENXIO;
778*4882a593Smuzhiyun 		goto fail1;
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev);
782*4882a593Smuzhiyun 	if (!mmc) {
783*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to allocate mmc_host\n");
784*4882a593Smuzhiyun 		ret = -ENOMEM;
785*4882a593Smuzhiyun 		goto fail1;
786*4882a593Smuzhiyun 	}
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	mmc->ops = &wmt_mci_ops;
789*4882a593Smuzhiyun 	mmc->f_min = wmt_caps->f_min;
790*4882a593Smuzhiyun 	mmc->f_max = wmt_caps->f_max;
791*4882a593Smuzhiyun 	mmc->ocr_avail = wmt_caps->ocr_avail;
792*4882a593Smuzhiyun 	mmc->caps = wmt_caps->caps;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	mmc->max_seg_size = wmt_caps->max_seg_size;
795*4882a593Smuzhiyun 	mmc->max_segs = wmt_caps->max_segs;
796*4882a593Smuzhiyun 	mmc->max_blk_size = wmt_caps->max_blk_size;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	mmc->max_req_size = (16*512*mmc->max_segs);
799*4882a593Smuzhiyun 	mmc->max_blk_count = mmc->max_req_size / 512;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
802*4882a593Smuzhiyun 	priv->mmc = mmc;
803*4882a593Smuzhiyun 	priv->dev = &pdev->dev;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	priv->power_inverted = 0;
806*4882a593Smuzhiyun 	priv->cd_inverted = 0;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	if (of_get_property(np, "sdon-inverted", NULL))
809*4882a593Smuzhiyun 		priv->power_inverted = 1;
810*4882a593Smuzhiyun 	if (of_get_property(np, "cd-inverted", NULL))
811*4882a593Smuzhiyun 		priv->cd_inverted = 1;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	priv->sdmmc_base = of_iomap(np, 0);
814*4882a593Smuzhiyun 	if (!priv->sdmmc_base) {
815*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to map IO space\n");
816*4882a593Smuzhiyun 		ret = -ENOMEM;
817*4882a593Smuzhiyun 		goto fail2;
818*4882a593Smuzhiyun 	}
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	priv->irq_regular = regular_irq;
821*4882a593Smuzhiyun 	priv->irq_dma = dma_irq;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv);
824*4882a593Smuzhiyun 	if (ret) {
825*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Register regular IRQ fail\n");
826*4882a593Smuzhiyun 		goto fail3;
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv);
830*4882a593Smuzhiyun 	if (ret) {
831*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Register DMA IRQ fail\n");
832*4882a593Smuzhiyun 		goto fail4;
833*4882a593Smuzhiyun 	}
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	/* alloc some DMA buffers for descriptors/transfers */
836*4882a593Smuzhiyun 	priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev,
837*4882a593Smuzhiyun 						   mmc->max_blk_count * 16,
838*4882a593Smuzhiyun 						   &priv->dma_desc_device_addr,
839*4882a593Smuzhiyun 						   GFP_KERNEL);
840*4882a593Smuzhiyun 	if (!priv->dma_desc_buffer) {
841*4882a593Smuzhiyun 		dev_err(&pdev->dev, "DMA alloc fail\n");
842*4882a593Smuzhiyun 		ret = -EPERM;
843*4882a593Smuzhiyun 		goto fail5;
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	platform_set_drvdata(pdev, mmc);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	priv->clk_sdmmc = of_clk_get(np, 0);
849*4882a593Smuzhiyun 	if (IS_ERR(priv->clk_sdmmc)) {
850*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Error getting clock\n");
851*4882a593Smuzhiyun 		ret = PTR_ERR(priv->clk_sdmmc);
852*4882a593Smuzhiyun 		goto fail5_and_a_half;
853*4882a593Smuzhiyun 	}
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	ret = clk_prepare_enable(priv->clk_sdmmc);
856*4882a593Smuzhiyun 	if (ret)
857*4882a593Smuzhiyun 		goto fail6;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	/* configure the controller to a known 'ready' state */
860*4882a593Smuzhiyun 	wmt_reset_hardware(mmc);
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	mmc_add_host(mmc);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	dev_info(&pdev->dev, "WMT SDHC Controller initialized\n");
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	return 0;
867*4882a593Smuzhiyun fail6:
868*4882a593Smuzhiyun 	clk_put(priv->clk_sdmmc);
869*4882a593Smuzhiyun fail5_and_a_half:
870*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev, mmc->max_blk_count * 16,
871*4882a593Smuzhiyun 			  priv->dma_desc_buffer, priv->dma_desc_device_addr);
872*4882a593Smuzhiyun fail5:
873*4882a593Smuzhiyun 	free_irq(dma_irq, priv);
874*4882a593Smuzhiyun fail4:
875*4882a593Smuzhiyun 	free_irq(regular_irq, priv);
876*4882a593Smuzhiyun fail3:
877*4882a593Smuzhiyun 	iounmap(priv->sdmmc_base);
878*4882a593Smuzhiyun fail2:
879*4882a593Smuzhiyun 	mmc_free_host(mmc);
880*4882a593Smuzhiyun fail1:
881*4882a593Smuzhiyun 	return ret;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun 
wmt_mci_remove(struct platform_device * pdev)884*4882a593Smuzhiyun static int wmt_mci_remove(struct platform_device *pdev)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun 	struct mmc_host *mmc;
887*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
888*4882a593Smuzhiyun 	struct resource *res;
889*4882a593Smuzhiyun 	u32 reg_tmp;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	mmc = platform_get_drvdata(pdev);
892*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	/* reset SD controller */
895*4882a593Smuzhiyun 	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
896*4882a593Smuzhiyun 	writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE);
897*4882a593Smuzhiyun 	reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
898*4882a593Smuzhiyun 	writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN);
899*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
900*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	/* release the dma buffers */
903*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16,
904*4882a593Smuzhiyun 			  priv->dma_desc_buffer, priv->dma_desc_device_addr);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	mmc_remove_host(mmc);
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	free_irq(priv->irq_regular, priv);
909*4882a593Smuzhiyun 	free_irq(priv->irq_dma, priv);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	iounmap(priv->sdmmc_base);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	clk_disable_unprepare(priv->clk_sdmmc);
914*4882a593Smuzhiyun 	clk_put(priv->clk_sdmmc);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
917*4882a593Smuzhiyun 	release_mem_region(res->start, resource_size(res));
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	mmc_free_host(mmc);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	dev_info(&pdev->dev, "WMT MCI device removed\n");
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	return 0;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun #ifdef CONFIG_PM
wmt_mci_suspend(struct device * dev)927*4882a593Smuzhiyun static int wmt_mci_suspend(struct device *dev)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	u32 reg_tmp;
930*4882a593Smuzhiyun 	struct mmc_host *mmc = dev_get_drvdata(dev);
931*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	if (!mmc)
934*4882a593Smuzhiyun 		return 0;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	priv = mmc_priv(mmc);
937*4882a593Smuzhiyun 	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
938*4882a593Smuzhiyun 	writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
939*4882a593Smuzhiyun 	       SDMMC_BUSMODE);
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
942*4882a593Smuzhiyun 	writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN);
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0);
945*4882a593Smuzhiyun 	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	clk_disable(priv->clk_sdmmc);
948*4882a593Smuzhiyun 	return 0;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun 
wmt_mci_resume(struct device * dev)951*4882a593Smuzhiyun static int wmt_mci_resume(struct device *dev)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun 	u32 reg_tmp;
954*4882a593Smuzhiyun 	struct mmc_host *mmc = dev_get_drvdata(dev);
955*4882a593Smuzhiyun 	struct wmt_mci_priv *priv;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	if (mmc) {
958*4882a593Smuzhiyun 		priv = mmc_priv(mmc);
959*4882a593Smuzhiyun 		clk_enable(priv->clk_sdmmc);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 		reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE);
962*4882a593Smuzhiyun 		writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base +
963*4882a593Smuzhiyun 		       SDMMC_BUSMODE);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 		reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN);
966*4882a593Smuzhiyun 		writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE),
967*4882a593Smuzhiyun 		       priv->sdmmc_base + SDMMC_BLKLEN);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 		reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0);
970*4882a593Smuzhiyun 		writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base +
971*4882a593Smuzhiyun 		       SDMMC_INTMASK0);
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	}
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	return 0;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun static const struct dev_pm_ops wmt_mci_pm = {
979*4882a593Smuzhiyun 	.suspend        = wmt_mci_suspend,
980*4882a593Smuzhiyun 	.resume         = wmt_mci_resume,
981*4882a593Smuzhiyun };
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun #define wmt_mci_pm_ops (&wmt_mci_pm)
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun #else	/* !CONFIG_PM */
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun #define wmt_mci_pm_ops NULL
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun #endif
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun static struct platform_driver wmt_mci_driver = {
992*4882a593Smuzhiyun 	.probe = wmt_mci_probe,
993*4882a593Smuzhiyun 	.remove = wmt_mci_remove,
994*4882a593Smuzhiyun 	.driver = {
995*4882a593Smuzhiyun 		.name = DRIVER_NAME,
996*4882a593Smuzhiyun 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
997*4882a593Smuzhiyun 		.pm = wmt_mci_pm_ops,
998*4882a593Smuzhiyun 		.of_match_table = wmt_mci_dt_ids,
999*4882a593Smuzhiyun 	},
1000*4882a593Smuzhiyun };
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun module_platform_driver(wmt_mci_driver);
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun MODULE_DESCRIPTION("Wondermedia MMC/SD Driver");
1005*4882a593Smuzhiyun MODULE_AUTHOR("Tony Prisk");
1006*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1007*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);
1008