xref: /OK3568_Linux_fs/kernel/drivers/mmc/host/sh_mmcif.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * MMCIF eMMC driver.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2010 Renesas Solutions Corp.
6*4882a593Smuzhiyun  * Yusuke Goda <yusuke.goda.sx@renesas.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun  * The MMCIF driver is now processing MMC requests asynchronously, according
11*4882a593Smuzhiyun  * to the Linux MMC API requirement.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
14*4882a593Smuzhiyun  * data, and optional stop. To achieve asynchronous processing each of these
15*4882a593Smuzhiyun  * stages is split into two halves: a top and a bottom half. The top half
16*4882a593Smuzhiyun  * initialises the hardware, installs a timeout handler to handle completion
17*4882a593Smuzhiyun  * timeouts, and returns. In case of the command stage this immediately returns
18*4882a593Smuzhiyun  * control to the caller, leaving all further processing to run asynchronously.
19*4882a593Smuzhiyun  * All further request processing is performed by the bottom halves.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
22*4882a593Smuzhiyun  * thread, a DMA completion callback, if DMA is used, a timeout work, and
23*4882a593Smuzhiyun  * request- and stage-specific handler methods.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * Each bottom half run begins with either a hardware interrupt, a DMA callback
26*4882a593Smuzhiyun  * invocation, or a timeout work run. In case of an error or a successful
27*4882a593Smuzhiyun  * processing completion, the MMC core is informed and the request processing is
28*4882a593Smuzhiyun  * finished. In case processing has to continue, i.e., if data has to be read
29*4882a593Smuzhiyun  * from or written to the card, or if a stop command has to be sent, the next
30*4882a593Smuzhiyun  * top half is called, which performs the necessary hardware handling and
31*4882a593Smuzhiyun  * reschedules the timeout work. This returns the driver state machine into the
32*4882a593Smuzhiyun  * bottom half waiting state.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/bitops.h>
36*4882a593Smuzhiyun #include <linux/clk.h>
37*4882a593Smuzhiyun #include <linux/completion.h>
38*4882a593Smuzhiyun #include <linux/delay.h>
39*4882a593Smuzhiyun #include <linux/dma-mapping.h>
40*4882a593Smuzhiyun #include <linux/dmaengine.h>
41*4882a593Smuzhiyun #include <linux/mmc/card.h>
42*4882a593Smuzhiyun #include <linux/mmc/core.h>
43*4882a593Smuzhiyun #include <linux/mmc/host.h>
44*4882a593Smuzhiyun #include <linux/mmc/mmc.h>
45*4882a593Smuzhiyun #include <linux/mmc/sdio.h>
46*4882a593Smuzhiyun #include <linux/mmc/sh_mmcif.h>
47*4882a593Smuzhiyun #include <linux/mmc/slot-gpio.h>
48*4882a593Smuzhiyun #include <linux/mod_devicetable.h>
49*4882a593Smuzhiyun #include <linux/mutex.h>
50*4882a593Smuzhiyun #include <linux/of_device.h>
51*4882a593Smuzhiyun #include <linux/pagemap.h>
52*4882a593Smuzhiyun #include <linux/platform_device.h>
53*4882a593Smuzhiyun #include <linux/pm_qos.h>
54*4882a593Smuzhiyun #include <linux/pm_runtime.h>
55*4882a593Smuzhiyun #include <linux/sh_dma.h>
56*4882a593Smuzhiyun #include <linux/spinlock.h>
57*4882a593Smuzhiyun #include <linux/module.h>
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #define DRIVER_NAME	"sh_mmcif"
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /* CE_CMD_SET */
62*4882a593Smuzhiyun #define CMD_MASK		0x3f000000
63*4882a593Smuzhiyun #define CMD_SET_RTYP_NO		((0 << 23) | (0 << 22))
64*4882a593Smuzhiyun #define CMD_SET_RTYP_6B		((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
65*4882a593Smuzhiyun #define CMD_SET_RTYP_17B	((1 << 23) | (0 << 22)) /* R2 */
66*4882a593Smuzhiyun #define CMD_SET_RBSY		(1 << 21) /* R1b */
67*4882a593Smuzhiyun #define CMD_SET_CCSEN		(1 << 20)
68*4882a593Smuzhiyun #define CMD_SET_WDAT		(1 << 19) /* 1: on data, 0: no data */
69*4882a593Smuzhiyun #define CMD_SET_DWEN		(1 << 18) /* 1: write, 0: read */
70*4882a593Smuzhiyun #define CMD_SET_CMLTE		(1 << 17) /* 1: multi block trans, 0: single */
71*4882a593Smuzhiyun #define CMD_SET_CMD12EN		(1 << 16) /* 1: CMD12 auto issue */
72*4882a593Smuzhiyun #define CMD_SET_RIDXC_INDEX	((0 << 15) | (0 << 14)) /* index check */
73*4882a593Smuzhiyun #define CMD_SET_RIDXC_BITS	((0 << 15) | (1 << 14)) /* check bits check */
74*4882a593Smuzhiyun #define CMD_SET_RIDXC_NO	((1 << 15) | (0 << 14)) /* no check */
75*4882a593Smuzhiyun #define CMD_SET_CRC7C		((0 << 13) | (0 << 12)) /* CRC7 check*/
76*4882a593Smuzhiyun #define CMD_SET_CRC7C_BITS	((0 << 13) | (1 << 12)) /* check bits check*/
77*4882a593Smuzhiyun #define CMD_SET_CRC7C_INTERNAL	((1 << 13) | (0 << 12)) /* internal CRC7 check*/
78*4882a593Smuzhiyun #define CMD_SET_CRC16C		(1 << 10) /* 0: CRC16 check*/
79*4882a593Smuzhiyun #define CMD_SET_CRCSTE		(1 << 8) /* 1: not receive CRC status */
80*4882a593Smuzhiyun #define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */
81*4882a593Smuzhiyun #define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */
82*4882a593Smuzhiyun #define CMD_SET_CCSH		(1 << 5)
83*4882a593Smuzhiyun #define CMD_SET_DARS		(1 << 2) /* Dual Data Rate */
84*4882a593Smuzhiyun #define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */
85*4882a593Smuzhiyun #define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */
86*4882a593Smuzhiyun #define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /* CE_CMD_CTRL */
89*4882a593Smuzhiyun #define CMD_CTRL_BREAK		(1 << 0)
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* CE_BLOCK_SET */
92*4882a593Smuzhiyun #define BLOCK_SIZE_MASK		0x0000ffff
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /* CE_INT */
95*4882a593Smuzhiyun #define INT_CCSDE		(1 << 29)
96*4882a593Smuzhiyun #define INT_CMD12DRE		(1 << 26)
97*4882a593Smuzhiyun #define INT_CMD12RBE		(1 << 25)
98*4882a593Smuzhiyun #define INT_CMD12CRE		(1 << 24)
99*4882a593Smuzhiyun #define INT_DTRANE		(1 << 23)
100*4882a593Smuzhiyun #define INT_BUFRE		(1 << 22)
101*4882a593Smuzhiyun #define INT_BUFWEN		(1 << 21)
102*4882a593Smuzhiyun #define INT_BUFREN		(1 << 20)
103*4882a593Smuzhiyun #define INT_CCSRCV		(1 << 19)
104*4882a593Smuzhiyun #define INT_RBSYE		(1 << 17)
105*4882a593Smuzhiyun #define INT_CRSPE		(1 << 16)
106*4882a593Smuzhiyun #define INT_CMDVIO		(1 << 15)
107*4882a593Smuzhiyun #define INT_BUFVIO		(1 << 14)
108*4882a593Smuzhiyun #define INT_WDATERR		(1 << 11)
109*4882a593Smuzhiyun #define INT_RDATERR		(1 << 10)
110*4882a593Smuzhiyun #define INT_RIDXERR		(1 << 9)
111*4882a593Smuzhiyun #define INT_RSPERR		(1 << 8)
112*4882a593Smuzhiyun #define INT_CCSTO		(1 << 5)
113*4882a593Smuzhiyun #define INT_CRCSTO		(1 << 4)
114*4882a593Smuzhiyun #define INT_WDATTO		(1 << 3)
115*4882a593Smuzhiyun #define INT_RDATTO		(1 << 2)
116*4882a593Smuzhiyun #define INT_RBSYTO		(1 << 1)
117*4882a593Smuzhiyun #define INT_RSPTO		(1 << 0)
118*4882a593Smuzhiyun #define INT_ERR_STS		(INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
119*4882a593Smuzhiyun 				 INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
120*4882a593Smuzhiyun 				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \
121*4882a593Smuzhiyun 				 INT_RDATTO | INT_RBSYTO | INT_RSPTO)
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define INT_ALL			(INT_RBSYE | INT_CRSPE | INT_BUFREN |	 \
124*4882a593Smuzhiyun 				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \
125*4882a593Smuzhiyun 				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE)
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* CE_INT_MASK */
130*4882a593Smuzhiyun #define MASK_ALL		0x00000000
131*4882a593Smuzhiyun #define MASK_MCCSDE		(1 << 29)
132*4882a593Smuzhiyun #define MASK_MCMD12DRE		(1 << 26)
133*4882a593Smuzhiyun #define MASK_MCMD12RBE		(1 << 25)
134*4882a593Smuzhiyun #define MASK_MCMD12CRE		(1 << 24)
135*4882a593Smuzhiyun #define MASK_MDTRANE		(1 << 23)
136*4882a593Smuzhiyun #define MASK_MBUFRE		(1 << 22)
137*4882a593Smuzhiyun #define MASK_MBUFWEN		(1 << 21)
138*4882a593Smuzhiyun #define MASK_MBUFREN		(1 << 20)
139*4882a593Smuzhiyun #define MASK_MCCSRCV		(1 << 19)
140*4882a593Smuzhiyun #define MASK_MRBSYE		(1 << 17)
141*4882a593Smuzhiyun #define MASK_MCRSPE		(1 << 16)
142*4882a593Smuzhiyun #define MASK_MCMDVIO		(1 << 15)
143*4882a593Smuzhiyun #define MASK_MBUFVIO		(1 << 14)
144*4882a593Smuzhiyun #define MASK_MWDATERR		(1 << 11)
145*4882a593Smuzhiyun #define MASK_MRDATERR		(1 << 10)
146*4882a593Smuzhiyun #define MASK_MRIDXERR		(1 << 9)
147*4882a593Smuzhiyun #define MASK_MRSPERR		(1 << 8)
148*4882a593Smuzhiyun #define MASK_MCCSTO		(1 << 5)
149*4882a593Smuzhiyun #define MASK_MCRCSTO		(1 << 4)
150*4882a593Smuzhiyun #define MASK_MWDATTO		(1 << 3)
151*4882a593Smuzhiyun #define MASK_MRDATTO		(1 << 2)
152*4882a593Smuzhiyun #define MASK_MRBSYTO		(1 << 1)
153*4882a593Smuzhiyun #define MASK_MRSPTO		(1 << 0)
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
156*4882a593Smuzhiyun 				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
157*4882a593Smuzhiyun 				 MASK_MCRCSTO | MASK_MWDATTO | \
158*4882a593Smuzhiyun 				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun #define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\
161*4882a593Smuzhiyun 				 MASK_MBUFREN | MASK_MBUFWEN |			\
162*4882a593Smuzhiyun 				 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |	\
163*4882a593Smuzhiyun 				 MASK_MCMD12RBE | MASK_MCMD12CRE)
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /* CE_HOST_STS1 */
166*4882a593Smuzhiyun #define STS1_CMDSEQ		(1 << 31)
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /* CE_HOST_STS2 */
169*4882a593Smuzhiyun #define STS2_CRCSTE		(1 << 31)
170*4882a593Smuzhiyun #define STS2_CRC16E		(1 << 30)
171*4882a593Smuzhiyun #define STS2_AC12CRCE		(1 << 29)
172*4882a593Smuzhiyun #define STS2_RSPCRC7E		(1 << 28)
173*4882a593Smuzhiyun #define STS2_CRCSTEBE		(1 << 27)
174*4882a593Smuzhiyun #define STS2_RDATEBE		(1 << 26)
175*4882a593Smuzhiyun #define STS2_AC12REBE		(1 << 25)
176*4882a593Smuzhiyun #define STS2_RSPEBE		(1 << 24)
177*4882a593Smuzhiyun #define STS2_AC12IDXE		(1 << 23)
178*4882a593Smuzhiyun #define STS2_RSPIDXE		(1 << 22)
179*4882a593Smuzhiyun #define STS2_CCSTO		(1 << 15)
180*4882a593Smuzhiyun #define STS2_RDATTO		(1 << 14)
181*4882a593Smuzhiyun #define STS2_DATBSYTO		(1 << 13)
182*4882a593Smuzhiyun #define STS2_CRCSTTO		(1 << 12)
183*4882a593Smuzhiyun #define STS2_AC12BSYTO		(1 << 11)
184*4882a593Smuzhiyun #define STS2_RSPBSYTO		(1 << 10)
185*4882a593Smuzhiyun #define STS2_AC12RSPTO		(1 << 9)
186*4882a593Smuzhiyun #define STS2_RSPTO		(1 << 8)
187*4882a593Smuzhiyun #define STS2_CRC_ERR		(STS2_CRCSTE | STS2_CRC16E |		\
188*4882a593Smuzhiyun 				 STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
189*4882a593Smuzhiyun #define STS2_TIMEOUT_ERR	(STS2_CCSTO | STS2_RDATTO |		\
190*4882a593Smuzhiyun 				 STS2_DATBSYTO | STS2_CRCSTTO |		\
191*4882a593Smuzhiyun 				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\
192*4882a593Smuzhiyun 				 STS2_AC12RSPTO | STS2_RSPTO)
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun #define CLKDEV_EMMC_DATA	52000000 /* 52 MHz */
195*4882a593Smuzhiyun #define CLKDEV_MMC_DATA		20000000 /* 20 MHz */
196*4882a593Smuzhiyun #define CLKDEV_INIT		400000   /* 400 kHz */
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun enum sh_mmcif_state {
199*4882a593Smuzhiyun 	STATE_IDLE,
200*4882a593Smuzhiyun 	STATE_REQUEST,
201*4882a593Smuzhiyun 	STATE_IOS,
202*4882a593Smuzhiyun 	STATE_TIMEOUT,
203*4882a593Smuzhiyun };
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun enum sh_mmcif_wait_for {
206*4882a593Smuzhiyun 	MMCIF_WAIT_FOR_REQUEST,
207*4882a593Smuzhiyun 	MMCIF_WAIT_FOR_CMD,
208*4882a593Smuzhiyun 	MMCIF_WAIT_FOR_MREAD,
209*4882a593Smuzhiyun 	MMCIF_WAIT_FOR_MWRITE,
210*4882a593Smuzhiyun 	MMCIF_WAIT_FOR_READ,
211*4882a593Smuzhiyun 	MMCIF_WAIT_FOR_WRITE,
212*4882a593Smuzhiyun 	MMCIF_WAIT_FOR_READ_END,
213*4882a593Smuzhiyun 	MMCIF_WAIT_FOR_WRITE_END,
214*4882a593Smuzhiyun 	MMCIF_WAIT_FOR_STOP,
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun  * difference for each SoC
219*4882a593Smuzhiyun  */
220*4882a593Smuzhiyun struct sh_mmcif_host {
221*4882a593Smuzhiyun 	struct mmc_host *mmc;
222*4882a593Smuzhiyun 	struct mmc_request *mrq;
223*4882a593Smuzhiyun 	struct platform_device *pd;
224*4882a593Smuzhiyun 	struct clk *clk;
225*4882a593Smuzhiyun 	int bus_width;
226*4882a593Smuzhiyun 	unsigned char timing;
227*4882a593Smuzhiyun 	bool sd_error;
228*4882a593Smuzhiyun 	bool dying;
229*4882a593Smuzhiyun 	long timeout;
230*4882a593Smuzhiyun 	void __iomem *addr;
231*4882a593Smuzhiyun 	u32 *pio_ptr;
232*4882a593Smuzhiyun 	spinlock_t lock;		/* protect sh_mmcif_host::state */
233*4882a593Smuzhiyun 	enum sh_mmcif_state state;
234*4882a593Smuzhiyun 	enum sh_mmcif_wait_for wait_for;
235*4882a593Smuzhiyun 	struct delayed_work timeout_work;
236*4882a593Smuzhiyun 	size_t blocksize;
237*4882a593Smuzhiyun 	int sg_idx;
238*4882a593Smuzhiyun 	int sg_blkidx;
239*4882a593Smuzhiyun 	bool power;
240*4882a593Smuzhiyun 	bool ccs_enable;		/* Command Completion Signal support */
241*4882a593Smuzhiyun 	bool clk_ctrl2_enable;
242*4882a593Smuzhiyun 	struct mutex thread_lock;
243*4882a593Smuzhiyun 	u32 clkdiv_map;         /* see CE_CLK_CTRL::CLKDIV */
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* DMA support */
246*4882a593Smuzhiyun 	struct dma_chan		*chan_rx;
247*4882a593Smuzhiyun 	struct dma_chan		*chan_tx;
248*4882a593Smuzhiyun 	struct completion	dma_complete;
249*4882a593Smuzhiyun 	bool			dma_active;
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun static const struct of_device_id sh_mmcif_of_match[] = {
253*4882a593Smuzhiyun 	{ .compatible = "renesas,sh-mmcif" },
254*4882a593Smuzhiyun 	{ }
255*4882a593Smuzhiyun };
256*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, sh_mmcif_of_match);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun #define sh_mmcif_host_to_dev(host) (&host->pd->dev)
259*4882a593Smuzhiyun 
sh_mmcif_bitset(struct sh_mmcif_host * host,unsigned int reg,u32 val)260*4882a593Smuzhiyun static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
261*4882a593Smuzhiyun 					unsigned int reg, u32 val)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	writel(val | readl(host->addr + reg), host->addr + reg);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
sh_mmcif_bitclr(struct sh_mmcif_host * host,unsigned int reg,u32 val)266*4882a593Smuzhiyun static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
267*4882a593Smuzhiyun 					unsigned int reg, u32 val)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	writel(~val & readl(host->addr + reg), host->addr + reg);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
sh_mmcif_dma_complete(void * arg)272*4882a593Smuzhiyun static void sh_mmcif_dma_complete(void *arg)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct sh_mmcif_host *host = arg;
275*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
276*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	dev_dbg(dev, "Command completed\n");
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
281*4882a593Smuzhiyun 		 dev_name(dev)))
282*4882a593Smuzhiyun 		return;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	complete(&host->dma_complete);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
sh_mmcif_start_dma_rx(struct sh_mmcif_host * host)287*4882a593Smuzhiyun static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
290*4882a593Smuzhiyun 	struct scatterlist *sg = data->sg;
291*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *desc = NULL;
292*4882a593Smuzhiyun 	struct dma_chan *chan = host->chan_rx;
293*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
294*4882a593Smuzhiyun 	dma_cookie_t cookie = -EINVAL;
295*4882a593Smuzhiyun 	int ret;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
298*4882a593Smuzhiyun 			 DMA_FROM_DEVICE);
299*4882a593Smuzhiyun 	if (ret > 0) {
300*4882a593Smuzhiyun 		host->dma_active = true;
301*4882a593Smuzhiyun 		desc = dmaengine_prep_slave_sg(chan, sg, ret,
302*4882a593Smuzhiyun 			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
303*4882a593Smuzhiyun 	}
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	if (desc) {
306*4882a593Smuzhiyun 		desc->callback = sh_mmcif_dma_complete;
307*4882a593Smuzhiyun 		desc->callback_param = host;
308*4882a593Smuzhiyun 		cookie = dmaengine_submit(desc);
309*4882a593Smuzhiyun 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
310*4882a593Smuzhiyun 		dma_async_issue_pending(chan);
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
313*4882a593Smuzhiyun 		__func__, data->sg_len, ret, cookie);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if (!desc) {
316*4882a593Smuzhiyun 		/* DMA failed, fall back to PIO */
317*4882a593Smuzhiyun 		if (ret >= 0)
318*4882a593Smuzhiyun 			ret = -EIO;
319*4882a593Smuzhiyun 		host->chan_rx = NULL;
320*4882a593Smuzhiyun 		host->dma_active = false;
321*4882a593Smuzhiyun 		dma_release_channel(chan);
322*4882a593Smuzhiyun 		/* Free the Tx channel too */
323*4882a593Smuzhiyun 		chan = host->chan_tx;
324*4882a593Smuzhiyun 		if (chan) {
325*4882a593Smuzhiyun 			host->chan_tx = NULL;
326*4882a593Smuzhiyun 			dma_release_channel(chan);
327*4882a593Smuzhiyun 		}
328*4882a593Smuzhiyun 		dev_warn(dev,
329*4882a593Smuzhiyun 			 "DMA failed: %d, falling back to PIO\n", ret);
330*4882a593Smuzhiyun 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	dev_dbg(dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
334*4882a593Smuzhiyun 		desc, cookie, data->sg_len);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
sh_mmcif_start_dma_tx(struct sh_mmcif_host * host)337*4882a593Smuzhiyun static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
340*4882a593Smuzhiyun 	struct scatterlist *sg = data->sg;
341*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *desc = NULL;
342*4882a593Smuzhiyun 	struct dma_chan *chan = host->chan_tx;
343*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
344*4882a593Smuzhiyun 	dma_cookie_t cookie = -EINVAL;
345*4882a593Smuzhiyun 	int ret;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
348*4882a593Smuzhiyun 			 DMA_TO_DEVICE);
349*4882a593Smuzhiyun 	if (ret > 0) {
350*4882a593Smuzhiyun 		host->dma_active = true;
351*4882a593Smuzhiyun 		desc = dmaengine_prep_slave_sg(chan, sg, ret,
352*4882a593Smuzhiyun 			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (desc) {
356*4882a593Smuzhiyun 		desc->callback = sh_mmcif_dma_complete;
357*4882a593Smuzhiyun 		desc->callback_param = host;
358*4882a593Smuzhiyun 		cookie = dmaengine_submit(desc);
359*4882a593Smuzhiyun 		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
360*4882a593Smuzhiyun 		dma_async_issue_pending(chan);
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 	dev_dbg(dev, "%s(): mapped %d -> %d, cookie %d\n",
363*4882a593Smuzhiyun 		__func__, data->sg_len, ret, cookie);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (!desc) {
366*4882a593Smuzhiyun 		/* DMA failed, fall back to PIO */
367*4882a593Smuzhiyun 		if (ret >= 0)
368*4882a593Smuzhiyun 			ret = -EIO;
369*4882a593Smuzhiyun 		host->chan_tx = NULL;
370*4882a593Smuzhiyun 		host->dma_active = false;
371*4882a593Smuzhiyun 		dma_release_channel(chan);
372*4882a593Smuzhiyun 		/* Free the Rx channel too */
373*4882a593Smuzhiyun 		chan = host->chan_rx;
374*4882a593Smuzhiyun 		if (chan) {
375*4882a593Smuzhiyun 			host->chan_rx = NULL;
376*4882a593Smuzhiyun 			dma_release_channel(chan);
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 		dev_warn(dev,
379*4882a593Smuzhiyun 			 "DMA failed: %d, falling back to PIO\n", ret);
380*4882a593Smuzhiyun 		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	dev_dbg(dev, "%s(): desc %p, cookie %d\n", __func__,
384*4882a593Smuzhiyun 		desc, cookie);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun static struct dma_chan *
sh_mmcif_request_dma_pdata(struct sh_mmcif_host * host,uintptr_t slave_id)388*4882a593Smuzhiyun sh_mmcif_request_dma_pdata(struct sh_mmcif_host *host, uintptr_t slave_id)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	dma_cap_mask_t mask;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	dma_cap_zero(mask);
393*4882a593Smuzhiyun 	dma_cap_set(DMA_SLAVE, mask);
394*4882a593Smuzhiyun 	if (slave_id <= 0)
395*4882a593Smuzhiyun 		return NULL;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	return dma_request_channel(mask, shdma_chan_filter, (void *)slave_id);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
sh_mmcif_dma_slave_config(struct sh_mmcif_host * host,struct dma_chan * chan,enum dma_transfer_direction direction)400*4882a593Smuzhiyun static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
401*4882a593Smuzhiyun 				     struct dma_chan *chan,
402*4882a593Smuzhiyun 				     enum dma_transfer_direction direction)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	struct resource *res;
405*4882a593Smuzhiyun 	struct dma_slave_config cfg = { 0, };
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
408*4882a593Smuzhiyun 	cfg.direction = direction;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	if (direction == DMA_DEV_TO_MEM) {
411*4882a593Smuzhiyun 		cfg.src_addr = res->start + MMCIF_CE_DATA;
412*4882a593Smuzhiyun 		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
413*4882a593Smuzhiyun 	} else {
414*4882a593Smuzhiyun 		cfg.dst_addr = res->start + MMCIF_CE_DATA;
415*4882a593Smuzhiyun 		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	return dmaengine_slave_config(chan, &cfg);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
sh_mmcif_request_dma(struct sh_mmcif_host * host)421*4882a593Smuzhiyun static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
424*4882a593Smuzhiyun 	host->dma_active = false;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	/* We can only either use DMA for both Tx and Rx or not use it at all */
427*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_SUPERH) && dev->platform_data) {
428*4882a593Smuzhiyun 		struct sh_mmcif_plat_data *pdata = dev->platform_data;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		host->chan_tx = sh_mmcif_request_dma_pdata(host,
431*4882a593Smuzhiyun 							pdata->slave_id_tx);
432*4882a593Smuzhiyun 		host->chan_rx = sh_mmcif_request_dma_pdata(host,
433*4882a593Smuzhiyun 							pdata->slave_id_rx);
434*4882a593Smuzhiyun 	} else {
435*4882a593Smuzhiyun 		host->chan_tx = dma_request_chan(dev, "tx");
436*4882a593Smuzhiyun 		if (IS_ERR(host->chan_tx))
437*4882a593Smuzhiyun 			host->chan_tx = NULL;
438*4882a593Smuzhiyun 		host->chan_rx = dma_request_chan(dev, "rx");
439*4882a593Smuzhiyun 		if (IS_ERR(host->chan_rx))
440*4882a593Smuzhiyun 			host->chan_rx = NULL;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 	dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
443*4882a593Smuzhiyun 		host->chan_rx);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if (!host->chan_tx || !host->chan_rx ||
446*4882a593Smuzhiyun 	    sh_mmcif_dma_slave_config(host, host->chan_tx, DMA_MEM_TO_DEV) ||
447*4882a593Smuzhiyun 	    sh_mmcif_dma_slave_config(host, host->chan_rx, DMA_DEV_TO_MEM))
448*4882a593Smuzhiyun 		goto error;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	return;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun error:
453*4882a593Smuzhiyun 	if (host->chan_tx)
454*4882a593Smuzhiyun 		dma_release_channel(host->chan_tx);
455*4882a593Smuzhiyun 	if (host->chan_rx)
456*4882a593Smuzhiyun 		dma_release_channel(host->chan_rx);
457*4882a593Smuzhiyun 	host->chan_tx = host->chan_rx = NULL;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
sh_mmcif_release_dma(struct sh_mmcif_host * host)460*4882a593Smuzhiyun static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
463*4882a593Smuzhiyun 	/* Descriptors are freed automatically */
464*4882a593Smuzhiyun 	if (host->chan_tx) {
465*4882a593Smuzhiyun 		struct dma_chan *chan = host->chan_tx;
466*4882a593Smuzhiyun 		host->chan_tx = NULL;
467*4882a593Smuzhiyun 		dma_release_channel(chan);
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 	if (host->chan_rx) {
470*4882a593Smuzhiyun 		struct dma_chan *chan = host->chan_rx;
471*4882a593Smuzhiyun 		host->chan_rx = NULL;
472*4882a593Smuzhiyun 		dma_release_channel(chan);
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	host->dma_active = false;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
sh_mmcif_clock_control(struct sh_mmcif_host * host,unsigned int clk)478*4882a593Smuzhiyun static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
481*4882a593Smuzhiyun 	struct sh_mmcif_plat_data *p = dev->platform_data;
482*4882a593Smuzhiyun 	bool sup_pclk = p ? p->sup_pclk : false;
483*4882a593Smuzhiyun 	unsigned int current_clk = clk_get_rate(host->clk);
484*4882a593Smuzhiyun 	unsigned int clkdiv;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
487*4882a593Smuzhiyun 	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (!clk)
490*4882a593Smuzhiyun 		return;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	if (host->clkdiv_map) {
493*4882a593Smuzhiyun 		unsigned int freq, best_freq, myclk, div, diff_min, diff;
494*4882a593Smuzhiyun 		int i;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		clkdiv = 0;
497*4882a593Smuzhiyun 		diff_min = ~0;
498*4882a593Smuzhiyun 		best_freq = 0;
499*4882a593Smuzhiyun 		for (i = 31; i >= 0; i--) {
500*4882a593Smuzhiyun 			if (!((1 << i) & host->clkdiv_map))
501*4882a593Smuzhiyun 				continue;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 			/*
504*4882a593Smuzhiyun 			 * clk = parent_freq / div
505*4882a593Smuzhiyun 			 * -> parent_freq = clk x div
506*4882a593Smuzhiyun 			 */
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 			div = 1 << (i + 1);
509*4882a593Smuzhiyun 			freq = clk_round_rate(host->clk, clk * div);
510*4882a593Smuzhiyun 			myclk = freq / div;
511*4882a593Smuzhiyun 			diff = (myclk > clk) ? myclk - clk : clk - myclk;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 			if (diff <= diff_min) {
514*4882a593Smuzhiyun 				best_freq = freq;
515*4882a593Smuzhiyun 				clkdiv = i;
516*4882a593Smuzhiyun 				diff_min = diff;
517*4882a593Smuzhiyun 			}
518*4882a593Smuzhiyun 		}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 		dev_dbg(dev, "clk %u/%u (%u, 0x%x)\n",
521*4882a593Smuzhiyun 			(best_freq / (1 << (clkdiv + 1))), clk,
522*4882a593Smuzhiyun 			best_freq, clkdiv);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 		clk_set_rate(host->clk, best_freq);
525*4882a593Smuzhiyun 		clkdiv = clkdiv << 16;
526*4882a593Smuzhiyun 	} else if (sup_pclk && clk == current_clk) {
527*4882a593Smuzhiyun 		clkdiv = CLK_SUP_PCLK;
528*4882a593Smuzhiyun 	} else {
529*4882a593Smuzhiyun 		clkdiv = (fls(DIV_ROUND_UP(current_clk, clk) - 1) - 1) << 16;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & clkdiv);
533*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
sh_mmcif_sync_reset(struct sh_mmcif_host * host)536*4882a593Smuzhiyun static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	u32 tmp;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
543*4882a593Smuzhiyun 	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
544*4882a593Smuzhiyun 	if (host->ccs_enable)
545*4882a593Smuzhiyun 		tmp |= SCCSTO_29;
546*4882a593Smuzhiyun 	if (host->clk_ctrl2_enable)
547*4882a593Smuzhiyun 		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);
548*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
549*4882a593Smuzhiyun 		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);
550*4882a593Smuzhiyun 	/* byte swap on */
551*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
sh_mmcif_error_manage(struct sh_mmcif_host * host)554*4882a593Smuzhiyun static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
557*4882a593Smuzhiyun 	u32 state1, state2;
558*4882a593Smuzhiyun 	int ret, timeout;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	host->sd_error = false;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
563*4882a593Smuzhiyun 	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
564*4882a593Smuzhiyun 	dev_dbg(dev, "ERR HOST_STS1 = %08x\n", state1);
565*4882a593Smuzhiyun 	dev_dbg(dev, "ERR HOST_STS2 = %08x\n", state2);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (state1 & STS1_CMDSEQ) {
568*4882a593Smuzhiyun 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
569*4882a593Smuzhiyun 		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
570*4882a593Smuzhiyun 		for (timeout = 10000; timeout; timeout--) {
571*4882a593Smuzhiyun 			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
572*4882a593Smuzhiyun 			      & STS1_CMDSEQ))
573*4882a593Smuzhiyun 				break;
574*4882a593Smuzhiyun 			mdelay(1);
575*4882a593Smuzhiyun 		}
576*4882a593Smuzhiyun 		if (!timeout) {
577*4882a593Smuzhiyun 			dev_err(dev,
578*4882a593Smuzhiyun 				"Forced end of command sequence timeout err\n");
579*4882a593Smuzhiyun 			return -EIO;
580*4882a593Smuzhiyun 		}
581*4882a593Smuzhiyun 		sh_mmcif_sync_reset(host);
582*4882a593Smuzhiyun 		dev_dbg(dev, "Forced end of command sequence\n");
583*4882a593Smuzhiyun 		return -EIO;
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	if (state2 & STS2_CRC_ERR) {
587*4882a593Smuzhiyun 		dev_err(dev, " CRC error: state %u, wait %u\n",
588*4882a593Smuzhiyun 			host->state, host->wait_for);
589*4882a593Smuzhiyun 		ret = -EIO;
590*4882a593Smuzhiyun 	} else if (state2 & STS2_TIMEOUT_ERR) {
591*4882a593Smuzhiyun 		dev_err(dev, " Timeout: state %u, wait %u\n",
592*4882a593Smuzhiyun 			host->state, host->wait_for);
593*4882a593Smuzhiyun 		ret = -ETIMEDOUT;
594*4882a593Smuzhiyun 	} else {
595*4882a593Smuzhiyun 		dev_dbg(dev, " End/Index error: state %u, wait %u\n",
596*4882a593Smuzhiyun 			host->state, host->wait_for);
597*4882a593Smuzhiyun 		ret = -EIO;
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun 	return ret;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
sh_mmcif_next_block(struct sh_mmcif_host * host,u32 * p)602*4882a593Smuzhiyun static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	host->sg_blkidx += host->blocksize;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* data->sg->length must be a multiple of host->blocksize? */
609*4882a593Smuzhiyun 	BUG_ON(host->sg_blkidx > data->sg->length);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	if (host->sg_blkidx == data->sg->length) {
612*4882a593Smuzhiyun 		host->sg_blkidx = 0;
613*4882a593Smuzhiyun 		if (++host->sg_idx < data->sg_len)
614*4882a593Smuzhiyun 			host->pio_ptr = sg_virt(++data->sg);
615*4882a593Smuzhiyun 	} else {
616*4882a593Smuzhiyun 		host->pio_ptr = p;
617*4882a593Smuzhiyun 	}
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	return host->sg_idx != data->sg_len;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
sh_mmcif_single_read(struct sh_mmcif_host * host,struct mmc_request * mrq)622*4882a593Smuzhiyun static void sh_mmcif_single_read(struct sh_mmcif_host *host,
623*4882a593Smuzhiyun 				 struct mmc_request *mrq)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun 	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
626*4882a593Smuzhiyun 			   BLOCK_SIZE_MASK) + 3;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_READ;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	/* buf read enable */
631*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun 
sh_mmcif_read_block(struct sh_mmcif_host * host)634*4882a593Smuzhiyun static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
637*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
638*4882a593Smuzhiyun 	u32 *p = sg_virt(data->sg);
639*4882a593Smuzhiyun 	int i;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	if (host->sd_error) {
642*4882a593Smuzhiyun 		data->error = sh_mmcif_error_manage(host);
643*4882a593Smuzhiyun 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
644*4882a593Smuzhiyun 		return false;
645*4882a593Smuzhiyun 	}
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	for (i = 0; i < host->blocksize / 4; i++)
648*4882a593Smuzhiyun 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/* buffer read end */
651*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
652*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_READ_END;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	return true;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
sh_mmcif_multi_read(struct sh_mmcif_host * host,struct mmc_request * mrq)657*4882a593Smuzhiyun static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
658*4882a593Smuzhiyun 				struct mmc_request *mrq)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	struct mmc_data *data = mrq->data;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	if (!data->sg_len || !data->sg->length)
663*4882a593Smuzhiyun 		return;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
666*4882a593Smuzhiyun 		BLOCK_SIZE_MASK;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_MREAD;
669*4882a593Smuzhiyun 	host->sg_idx = 0;
670*4882a593Smuzhiyun 	host->sg_blkidx = 0;
671*4882a593Smuzhiyun 	host->pio_ptr = sg_virt(data->sg);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
sh_mmcif_mread_block(struct sh_mmcif_host * host)676*4882a593Smuzhiyun static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
679*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
680*4882a593Smuzhiyun 	u32 *p = host->pio_ptr;
681*4882a593Smuzhiyun 	int i;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	if (host->sd_error) {
684*4882a593Smuzhiyun 		data->error = sh_mmcif_error_manage(host);
685*4882a593Smuzhiyun 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
686*4882a593Smuzhiyun 		return false;
687*4882a593Smuzhiyun 	}
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	BUG_ON(!data->sg->length);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	for (i = 0; i < host->blocksize / 4; i++)
692*4882a593Smuzhiyun 		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	if (!sh_mmcif_next_block(host, p))
695*4882a593Smuzhiyun 		return false;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	return true;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
sh_mmcif_single_write(struct sh_mmcif_host * host,struct mmc_request * mrq)702*4882a593Smuzhiyun static void sh_mmcif_single_write(struct sh_mmcif_host *host,
703*4882a593Smuzhiyun 					struct mmc_request *mrq)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
706*4882a593Smuzhiyun 			   BLOCK_SIZE_MASK) + 3;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_WRITE;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	/* buf write enable */
711*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun 
sh_mmcif_write_block(struct sh_mmcif_host * host)714*4882a593Smuzhiyun static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
717*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
718*4882a593Smuzhiyun 	u32 *p = sg_virt(data->sg);
719*4882a593Smuzhiyun 	int i;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	if (host->sd_error) {
722*4882a593Smuzhiyun 		data->error = sh_mmcif_error_manage(host);
723*4882a593Smuzhiyun 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
724*4882a593Smuzhiyun 		return false;
725*4882a593Smuzhiyun 	}
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	for (i = 0; i < host->blocksize / 4; i++)
728*4882a593Smuzhiyun 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/* buffer write end */
731*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
732*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	return true;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
sh_mmcif_multi_write(struct sh_mmcif_host * host,struct mmc_request * mrq)737*4882a593Smuzhiyun static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
738*4882a593Smuzhiyun 				struct mmc_request *mrq)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun 	struct mmc_data *data = mrq->data;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	if (!data->sg_len || !data->sg->length)
743*4882a593Smuzhiyun 		return;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
746*4882a593Smuzhiyun 		BLOCK_SIZE_MASK;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_MWRITE;
749*4882a593Smuzhiyun 	host->sg_idx = 0;
750*4882a593Smuzhiyun 	host->sg_blkidx = 0;
751*4882a593Smuzhiyun 	host->pio_ptr = sg_virt(data->sg);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
sh_mmcif_mwrite_block(struct sh_mmcif_host * host)756*4882a593Smuzhiyun static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
759*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
760*4882a593Smuzhiyun 	u32 *p = host->pio_ptr;
761*4882a593Smuzhiyun 	int i;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	if (host->sd_error) {
764*4882a593Smuzhiyun 		data->error = sh_mmcif_error_manage(host);
765*4882a593Smuzhiyun 		dev_dbg(dev, "%s(): %d\n", __func__, data->error);
766*4882a593Smuzhiyun 		return false;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	BUG_ON(!data->sg->length);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	for (i = 0; i < host->blocksize / 4; i++)
772*4882a593Smuzhiyun 		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	if (!sh_mmcif_next_block(host, p))
775*4882a593Smuzhiyun 		return false;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	return true;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
sh_mmcif_get_response(struct sh_mmcif_host * host,struct mmc_command * cmd)782*4882a593Smuzhiyun static void sh_mmcif_get_response(struct sh_mmcif_host *host,
783*4882a593Smuzhiyun 						struct mmc_command *cmd)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun 	if (cmd->flags & MMC_RSP_136) {
786*4882a593Smuzhiyun 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
787*4882a593Smuzhiyun 		cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
788*4882a593Smuzhiyun 		cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
789*4882a593Smuzhiyun 		cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
790*4882a593Smuzhiyun 	} else
791*4882a593Smuzhiyun 		cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
sh_mmcif_get_cmd12response(struct sh_mmcif_host * host,struct mmc_command * cmd)794*4882a593Smuzhiyun static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
795*4882a593Smuzhiyun 						struct mmc_command *cmd)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun 	cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
sh_mmcif_set_cmd(struct sh_mmcif_host * host,struct mmc_request * mrq)800*4882a593Smuzhiyun static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
801*4882a593Smuzhiyun 			    struct mmc_request *mrq)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
804*4882a593Smuzhiyun 	struct mmc_data *data = mrq->data;
805*4882a593Smuzhiyun 	struct mmc_command *cmd = mrq->cmd;
806*4882a593Smuzhiyun 	u32 opc = cmd->opcode;
807*4882a593Smuzhiyun 	u32 tmp = 0;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	/* Response Type check */
810*4882a593Smuzhiyun 	switch (mmc_resp_type(cmd)) {
811*4882a593Smuzhiyun 	case MMC_RSP_NONE:
812*4882a593Smuzhiyun 		tmp |= CMD_SET_RTYP_NO;
813*4882a593Smuzhiyun 		break;
814*4882a593Smuzhiyun 	case MMC_RSP_R1:
815*4882a593Smuzhiyun 	case MMC_RSP_R3:
816*4882a593Smuzhiyun 		tmp |= CMD_SET_RTYP_6B;
817*4882a593Smuzhiyun 		break;
818*4882a593Smuzhiyun 	case MMC_RSP_R1B:
819*4882a593Smuzhiyun 		tmp |= CMD_SET_RBSY | CMD_SET_RTYP_6B;
820*4882a593Smuzhiyun 		break;
821*4882a593Smuzhiyun 	case MMC_RSP_R2:
822*4882a593Smuzhiyun 		tmp |= CMD_SET_RTYP_17B;
823*4882a593Smuzhiyun 		break;
824*4882a593Smuzhiyun 	default:
825*4882a593Smuzhiyun 		dev_err(dev, "Unsupported response type.\n");
826*4882a593Smuzhiyun 		break;
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	/* WDAT / DATW */
830*4882a593Smuzhiyun 	if (data) {
831*4882a593Smuzhiyun 		tmp |= CMD_SET_WDAT;
832*4882a593Smuzhiyun 		switch (host->bus_width) {
833*4882a593Smuzhiyun 		case MMC_BUS_WIDTH_1:
834*4882a593Smuzhiyun 			tmp |= CMD_SET_DATW_1;
835*4882a593Smuzhiyun 			break;
836*4882a593Smuzhiyun 		case MMC_BUS_WIDTH_4:
837*4882a593Smuzhiyun 			tmp |= CMD_SET_DATW_4;
838*4882a593Smuzhiyun 			break;
839*4882a593Smuzhiyun 		case MMC_BUS_WIDTH_8:
840*4882a593Smuzhiyun 			tmp |= CMD_SET_DATW_8;
841*4882a593Smuzhiyun 			break;
842*4882a593Smuzhiyun 		default:
843*4882a593Smuzhiyun 			dev_err(dev, "Unsupported bus width.\n");
844*4882a593Smuzhiyun 			break;
845*4882a593Smuzhiyun 		}
846*4882a593Smuzhiyun 		switch (host->timing) {
847*4882a593Smuzhiyun 		case MMC_TIMING_MMC_DDR52:
848*4882a593Smuzhiyun 			/*
849*4882a593Smuzhiyun 			 * MMC core will only set this timing, if the host
850*4882a593Smuzhiyun 			 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR
851*4882a593Smuzhiyun 			 * capability. MMCIF implementations with this
852*4882a593Smuzhiyun 			 * capability, e.g. sh73a0, will have to set it
853*4882a593Smuzhiyun 			 * in their platform data.
854*4882a593Smuzhiyun 			 */
855*4882a593Smuzhiyun 			tmp |= CMD_SET_DARS;
856*4882a593Smuzhiyun 			break;
857*4882a593Smuzhiyun 		}
858*4882a593Smuzhiyun 	}
859*4882a593Smuzhiyun 	/* DWEN */
860*4882a593Smuzhiyun 	if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
861*4882a593Smuzhiyun 		tmp |= CMD_SET_DWEN;
862*4882a593Smuzhiyun 	/* CMLTE/CMD12EN */
863*4882a593Smuzhiyun 	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
864*4882a593Smuzhiyun 		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
865*4882a593Smuzhiyun 		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
866*4882a593Smuzhiyun 				data->blocks << 16);
867*4882a593Smuzhiyun 	}
868*4882a593Smuzhiyun 	/* RIDXC[1:0] check bits */
869*4882a593Smuzhiyun 	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
870*4882a593Smuzhiyun 	    opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
871*4882a593Smuzhiyun 		tmp |= CMD_SET_RIDXC_BITS;
872*4882a593Smuzhiyun 	/* RCRC7C[1:0] check bits */
873*4882a593Smuzhiyun 	if (opc == MMC_SEND_OP_COND)
874*4882a593Smuzhiyun 		tmp |= CMD_SET_CRC7C_BITS;
875*4882a593Smuzhiyun 	/* RCRC7C[1:0] internal CRC7 */
876*4882a593Smuzhiyun 	if (opc == MMC_ALL_SEND_CID ||
877*4882a593Smuzhiyun 		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
878*4882a593Smuzhiyun 		tmp |= CMD_SET_CRC7C_INTERNAL;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	return (opc << 24) | tmp;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun 
sh_mmcif_data_trans(struct sh_mmcif_host * host,struct mmc_request * mrq,u32 opc)883*4882a593Smuzhiyun static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
884*4882a593Smuzhiyun 			       struct mmc_request *mrq, u32 opc)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	switch (opc) {
889*4882a593Smuzhiyun 	case MMC_READ_MULTIPLE_BLOCK:
890*4882a593Smuzhiyun 		sh_mmcif_multi_read(host, mrq);
891*4882a593Smuzhiyun 		return 0;
892*4882a593Smuzhiyun 	case MMC_WRITE_MULTIPLE_BLOCK:
893*4882a593Smuzhiyun 		sh_mmcif_multi_write(host, mrq);
894*4882a593Smuzhiyun 		return 0;
895*4882a593Smuzhiyun 	case MMC_WRITE_BLOCK:
896*4882a593Smuzhiyun 		sh_mmcif_single_write(host, mrq);
897*4882a593Smuzhiyun 		return 0;
898*4882a593Smuzhiyun 	case MMC_READ_SINGLE_BLOCK:
899*4882a593Smuzhiyun 	case MMC_SEND_EXT_CSD:
900*4882a593Smuzhiyun 		sh_mmcif_single_read(host, mrq);
901*4882a593Smuzhiyun 		return 0;
902*4882a593Smuzhiyun 	default:
903*4882a593Smuzhiyun 		dev_err(dev, "Unsupported CMD%d\n", opc);
904*4882a593Smuzhiyun 		return -EINVAL;
905*4882a593Smuzhiyun 	}
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
sh_mmcif_start_cmd(struct sh_mmcif_host * host,struct mmc_request * mrq)908*4882a593Smuzhiyun static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
909*4882a593Smuzhiyun 			       struct mmc_request *mrq)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	struct mmc_command *cmd = mrq->cmd;
912*4882a593Smuzhiyun 	u32 opc;
913*4882a593Smuzhiyun 	u32 mask = 0;
914*4882a593Smuzhiyun 	unsigned long flags;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	if (cmd->flags & MMC_RSP_BUSY)
917*4882a593Smuzhiyun 		mask = MASK_START_CMD | MASK_MRBSYE;
918*4882a593Smuzhiyun 	else
919*4882a593Smuzhiyun 		mask = MASK_START_CMD | MASK_MCRSPE;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	if (host->ccs_enable)
922*4882a593Smuzhiyun 		mask |= MASK_MCCSTO;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	if (mrq->data) {
925*4882a593Smuzhiyun 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
926*4882a593Smuzhiyun 		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
927*4882a593Smuzhiyun 				mrq->data->blksz);
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 	opc = sh_mmcif_set_cmd(host, mrq);
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	if (host->ccs_enable)
932*4882a593Smuzhiyun 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
933*4882a593Smuzhiyun 	else
934*4882a593Smuzhiyun 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);
935*4882a593Smuzhiyun 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
936*4882a593Smuzhiyun 	/* set arg */
937*4882a593Smuzhiyun 	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
938*4882a593Smuzhiyun 	/* set cmd */
939*4882a593Smuzhiyun 	spin_lock_irqsave(&host->lock, flags);
940*4882a593Smuzhiyun 	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_CMD;
943*4882a593Smuzhiyun 	schedule_delayed_work(&host->timeout_work, host->timeout);
944*4882a593Smuzhiyun 	spin_unlock_irqrestore(&host->lock, flags);
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
sh_mmcif_stop_cmd(struct sh_mmcif_host * host,struct mmc_request * mrq)947*4882a593Smuzhiyun static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
948*4882a593Smuzhiyun 			      struct mmc_request *mrq)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	switch (mrq->cmd->opcode) {
953*4882a593Smuzhiyun 	case MMC_READ_MULTIPLE_BLOCK:
954*4882a593Smuzhiyun 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
955*4882a593Smuzhiyun 		break;
956*4882a593Smuzhiyun 	case MMC_WRITE_MULTIPLE_BLOCK:
957*4882a593Smuzhiyun 		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
958*4882a593Smuzhiyun 		break;
959*4882a593Smuzhiyun 	default:
960*4882a593Smuzhiyun 		dev_err(dev, "unsupported stop cmd\n");
961*4882a593Smuzhiyun 		mrq->stop->error = sh_mmcif_error_manage(host);
962*4882a593Smuzhiyun 		return;
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_STOP;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
sh_mmcif_request(struct mmc_host * mmc,struct mmc_request * mrq)968*4882a593Smuzhiyun static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun 	struct sh_mmcif_host *host = mmc_priv(mmc);
971*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
972*4882a593Smuzhiyun 	unsigned long flags;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	spin_lock_irqsave(&host->lock, flags);
975*4882a593Smuzhiyun 	if (host->state != STATE_IDLE) {
976*4882a593Smuzhiyun 		dev_dbg(dev, "%s() rejected, state %u\n",
977*4882a593Smuzhiyun 			__func__, host->state);
978*4882a593Smuzhiyun 		spin_unlock_irqrestore(&host->lock, flags);
979*4882a593Smuzhiyun 		mrq->cmd->error = -EAGAIN;
980*4882a593Smuzhiyun 		mmc_request_done(mmc, mrq);
981*4882a593Smuzhiyun 		return;
982*4882a593Smuzhiyun 	}
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	host->state = STATE_REQUEST;
985*4882a593Smuzhiyun 	spin_unlock_irqrestore(&host->lock, flags);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	host->mrq = mrq;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	sh_mmcif_start_cmd(host, mrq);
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun 
sh_mmcif_clk_setup(struct sh_mmcif_host * host)992*4882a593Smuzhiyun static void sh_mmcif_clk_setup(struct sh_mmcif_host *host)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	if (host->mmc->f_max) {
997*4882a593Smuzhiyun 		unsigned int f_max, f_min = 0, f_min_old;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 		f_max = host->mmc->f_max;
1000*4882a593Smuzhiyun 		for (f_min_old = f_max; f_min_old > 2;) {
1001*4882a593Smuzhiyun 			f_min = clk_round_rate(host->clk, f_min_old / 2);
1002*4882a593Smuzhiyun 			if (f_min == f_min_old)
1003*4882a593Smuzhiyun 				break;
1004*4882a593Smuzhiyun 			f_min_old = f_min;
1005*4882a593Smuzhiyun 		}
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 		/*
1008*4882a593Smuzhiyun 		 * This driver assumes this SoC is R-Car Gen2 or later
1009*4882a593Smuzhiyun 		 */
1010*4882a593Smuzhiyun 		host->clkdiv_map = 0x3ff;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 		host->mmc->f_max = f_max / (1 << ffs(host->clkdiv_map));
1013*4882a593Smuzhiyun 		host->mmc->f_min = f_min / (1 << fls(host->clkdiv_map));
1014*4882a593Smuzhiyun 	} else {
1015*4882a593Smuzhiyun 		unsigned int clk = clk_get_rate(host->clk);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 		host->mmc->f_max = clk / 2;
1018*4882a593Smuzhiyun 		host->mmc->f_min = clk / 512;
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	dev_dbg(dev, "clk max/min = %d/%d\n",
1022*4882a593Smuzhiyun 		host->mmc->f_max, host->mmc->f_min);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun 
sh_mmcif_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)1025*4882a593Smuzhiyun static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun 	struct sh_mmcif_host *host = mmc_priv(mmc);
1028*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
1029*4882a593Smuzhiyun 	unsigned long flags;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	spin_lock_irqsave(&host->lock, flags);
1032*4882a593Smuzhiyun 	if (host->state != STATE_IDLE) {
1033*4882a593Smuzhiyun 		dev_dbg(dev, "%s() rejected, state %u\n",
1034*4882a593Smuzhiyun 			__func__, host->state);
1035*4882a593Smuzhiyun 		spin_unlock_irqrestore(&host->lock, flags);
1036*4882a593Smuzhiyun 		return;
1037*4882a593Smuzhiyun 	}
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	host->state = STATE_IOS;
1040*4882a593Smuzhiyun 	spin_unlock_irqrestore(&host->lock, flags);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	switch (ios->power_mode) {
1043*4882a593Smuzhiyun 	case MMC_POWER_UP:
1044*4882a593Smuzhiyun 		if (!IS_ERR(mmc->supply.vmmc))
1045*4882a593Smuzhiyun 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
1046*4882a593Smuzhiyun 		if (!host->power) {
1047*4882a593Smuzhiyun 			clk_prepare_enable(host->clk);
1048*4882a593Smuzhiyun 			pm_runtime_get_sync(dev);
1049*4882a593Smuzhiyun 			sh_mmcif_sync_reset(host);
1050*4882a593Smuzhiyun 			sh_mmcif_request_dma(host);
1051*4882a593Smuzhiyun 			host->power = true;
1052*4882a593Smuzhiyun 		}
1053*4882a593Smuzhiyun 		break;
1054*4882a593Smuzhiyun 	case MMC_POWER_OFF:
1055*4882a593Smuzhiyun 		if (!IS_ERR(mmc->supply.vmmc))
1056*4882a593Smuzhiyun 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1057*4882a593Smuzhiyun 		if (host->power) {
1058*4882a593Smuzhiyun 			sh_mmcif_clock_control(host, 0);
1059*4882a593Smuzhiyun 			sh_mmcif_release_dma(host);
1060*4882a593Smuzhiyun 			pm_runtime_put(dev);
1061*4882a593Smuzhiyun 			clk_disable_unprepare(host->clk);
1062*4882a593Smuzhiyun 			host->power = false;
1063*4882a593Smuzhiyun 		}
1064*4882a593Smuzhiyun 		break;
1065*4882a593Smuzhiyun 	case MMC_POWER_ON:
1066*4882a593Smuzhiyun 		sh_mmcif_clock_control(host, ios->clock);
1067*4882a593Smuzhiyun 		break;
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	host->timing = ios->timing;
1071*4882a593Smuzhiyun 	host->bus_width = ios->bus_width;
1072*4882a593Smuzhiyun 	host->state = STATE_IDLE;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun static const struct mmc_host_ops sh_mmcif_ops = {
1076*4882a593Smuzhiyun 	.request	= sh_mmcif_request,
1077*4882a593Smuzhiyun 	.set_ios	= sh_mmcif_set_ios,
1078*4882a593Smuzhiyun 	.get_cd		= mmc_gpio_get_cd,
1079*4882a593Smuzhiyun };
1080*4882a593Smuzhiyun 
sh_mmcif_end_cmd(struct sh_mmcif_host * host)1081*4882a593Smuzhiyun static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	struct mmc_command *cmd = host->mrq->cmd;
1084*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
1085*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
1086*4882a593Smuzhiyun 	long time;
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	if (host->sd_error) {
1089*4882a593Smuzhiyun 		switch (cmd->opcode) {
1090*4882a593Smuzhiyun 		case MMC_ALL_SEND_CID:
1091*4882a593Smuzhiyun 		case MMC_SELECT_CARD:
1092*4882a593Smuzhiyun 		case MMC_APP_CMD:
1093*4882a593Smuzhiyun 			cmd->error = -ETIMEDOUT;
1094*4882a593Smuzhiyun 			break;
1095*4882a593Smuzhiyun 		default:
1096*4882a593Smuzhiyun 			cmd->error = sh_mmcif_error_manage(host);
1097*4882a593Smuzhiyun 			break;
1098*4882a593Smuzhiyun 		}
1099*4882a593Smuzhiyun 		dev_dbg(dev, "CMD%d error %d\n",
1100*4882a593Smuzhiyun 			cmd->opcode, cmd->error);
1101*4882a593Smuzhiyun 		host->sd_error = false;
1102*4882a593Smuzhiyun 		return false;
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 	if (!(cmd->flags & MMC_RSP_PRESENT)) {
1105*4882a593Smuzhiyun 		cmd->error = 0;
1106*4882a593Smuzhiyun 		return false;
1107*4882a593Smuzhiyun 	}
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	sh_mmcif_get_response(host, cmd);
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	if (!data)
1112*4882a593Smuzhiyun 		return false;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	/*
1115*4882a593Smuzhiyun 	 * Completion can be signalled from DMA callback and error, so, have to
1116*4882a593Smuzhiyun 	 * reset here, before setting .dma_active
1117*4882a593Smuzhiyun 	 */
1118*4882a593Smuzhiyun 	init_completion(&host->dma_complete);
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	if (data->flags & MMC_DATA_READ) {
1121*4882a593Smuzhiyun 		if (host->chan_rx)
1122*4882a593Smuzhiyun 			sh_mmcif_start_dma_rx(host);
1123*4882a593Smuzhiyun 	} else {
1124*4882a593Smuzhiyun 		if (host->chan_tx)
1125*4882a593Smuzhiyun 			sh_mmcif_start_dma_tx(host);
1126*4882a593Smuzhiyun 	}
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	if (!host->dma_active) {
1129*4882a593Smuzhiyun 		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1130*4882a593Smuzhiyun 		return !data->error;
1131*4882a593Smuzhiyun 	}
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	/* Running in the IRQ thread, can sleep */
1134*4882a593Smuzhiyun 	time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1135*4882a593Smuzhiyun 							 host->timeout);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	if (data->flags & MMC_DATA_READ)
1138*4882a593Smuzhiyun 		dma_unmap_sg(host->chan_rx->device->dev,
1139*4882a593Smuzhiyun 			     data->sg, data->sg_len,
1140*4882a593Smuzhiyun 			     DMA_FROM_DEVICE);
1141*4882a593Smuzhiyun 	else
1142*4882a593Smuzhiyun 		dma_unmap_sg(host->chan_tx->device->dev,
1143*4882a593Smuzhiyun 			     data->sg, data->sg_len,
1144*4882a593Smuzhiyun 			     DMA_TO_DEVICE);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	if (host->sd_error) {
1147*4882a593Smuzhiyun 		dev_err(host->mmc->parent,
1148*4882a593Smuzhiyun 			"Error IRQ while waiting for DMA completion!\n");
1149*4882a593Smuzhiyun 		/* Woken up by an error IRQ: abort DMA */
1150*4882a593Smuzhiyun 		data->error = sh_mmcif_error_manage(host);
1151*4882a593Smuzhiyun 	} else if (!time) {
1152*4882a593Smuzhiyun 		dev_err(host->mmc->parent, "DMA timeout!\n");
1153*4882a593Smuzhiyun 		data->error = -ETIMEDOUT;
1154*4882a593Smuzhiyun 	} else if (time < 0) {
1155*4882a593Smuzhiyun 		dev_err(host->mmc->parent,
1156*4882a593Smuzhiyun 			"wait_for_completion_...() error %ld!\n", time);
1157*4882a593Smuzhiyun 		data->error = time;
1158*4882a593Smuzhiyun 	}
1159*4882a593Smuzhiyun 	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1160*4882a593Smuzhiyun 			BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1161*4882a593Smuzhiyun 	host->dma_active = false;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	if (data->error) {
1164*4882a593Smuzhiyun 		data->bytes_xfered = 0;
1165*4882a593Smuzhiyun 		/* Abort DMA */
1166*4882a593Smuzhiyun 		if (data->flags & MMC_DATA_READ)
1167*4882a593Smuzhiyun 			dmaengine_terminate_all(host->chan_rx);
1168*4882a593Smuzhiyun 		else
1169*4882a593Smuzhiyun 			dmaengine_terminate_all(host->chan_tx);
1170*4882a593Smuzhiyun 	}
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	return false;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun 
sh_mmcif_irqt(int irq,void * dev_id)1175*4882a593Smuzhiyun static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun 	struct sh_mmcif_host *host = dev_id;
1178*4882a593Smuzhiyun 	struct mmc_request *mrq;
1179*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
1180*4882a593Smuzhiyun 	bool wait = false;
1181*4882a593Smuzhiyun 	unsigned long flags;
1182*4882a593Smuzhiyun 	int wait_work;
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	spin_lock_irqsave(&host->lock, flags);
1185*4882a593Smuzhiyun 	wait_work = host->wait_for;
1186*4882a593Smuzhiyun 	spin_unlock_irqrestore(&host->lock, flags);
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	cancel_delayed_work_sync(&host->timeout_work);
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	mutex_lock(&host->thread_lock);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	mrq = host->mrq;
1193*4882a593Smuzhiyun 	if (!mrq) {
1194*4882a593Smuzhiyun 		dev_dbg(dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1195*4882a593Smuzhiyun 			host->state, host->wait_for);
1196*4882a593Smuzhiyun 		mutex_unlock(&host->thread_lock);
1197*4882a593Smuzhiyun 		return IRQ_HANDLED;
1198*4882a593Smuzhiyun 	}
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	/*
1201*4882a593Smuzhiyun 	 * All handlers return true, if processing continues, and false, if the
1202*4882a593Smuzhiyun 	 * request has to be completed - successfully or not
1203*4882a593Smuzhiyun 	 */
1204*4882a593Smuzhiyun 	switch (wait_work) {
1205*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_REQUEST:
1206*4882a593Smuzhiyun 		/* We're too late, the timeout has already kicked in */
1207*4882a593Smuzhiyun 		mutex_unlock(&host->thread_lock);
1208*4882a593Smuzhiyun 		return IRQ_HANDLED;
1209*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_CMD:
1210*4882a593Smuzhiyun 		/* Wait for data? */
1211*4882a593Smuzhiyun 		wait = sh_mmcif_end_cmd(host);
1212*4882a593Smuzhiyun 		break;
1213*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_MREAD:
1214*4882a593Smuzhiyun 		/* Wait for more data? */
1215*4882a593Smuzhiyun 		wait = sh_mmcif_mread_block(host);
1216*4882a593Smuzhiyun 		break;
1217*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_READ:
1218*4882a593Smuzhiyun 		/* Wait for data end? */
1219*4882a593Smuzhiyun 		wait = sh_mmcif_read_block(host);
1220*4882a593Smuzhiyun 		break;
1221*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_MWRITE:
1222*4882a593Smuzhiyun 		/* Wait data to write? */
1223*4882a593Smuzhiyun 		wait = sh_mmcif_mwrite_block(host);
1224*4882a593Smuzhiyun 		break;
1225*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_WRITE:
1226*4882a593Smuzhiyun 		/* Wait for data end? */
1227*4882a593Smuzhiyun 		wait = sh_mmcif_write_block(host);
1228*4882a593Smuzhiyun 		break;
1229*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_STOP:
1230*4882a593Smuzhiyun 		if (host->sd_error) {
1231*4882a593Smuzhiyun 			mrq->stop->error = sh_mmcif_error_manage(host);
1232*4882a593Smuzhiyun 			dev_dbg(dev, "%s(): %d\n", __func__, mrq->stop->error);
1233*4882a593Smuzhiyun 			break;
1234*4882a593Smuzhiyun 		}
1235*4882a593Smuzhiyun 		sh_mmcif_get_cmd12response(host, mrq->stop);
1236*4882a593Smuzhiyun 		mrq->stop->error = 0;
1237*4882a593Smuzhiyun 		break;
1238*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_READ_END:
1239*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_WRITE_END:
1240*4882a593Smuzhiyun 		if (host->sd_error) {
1241*4882a593Smuzhiyun 			mrq->data->error = sh_mmcif_error_manage(host);
1242*4882a593Smuzhiyun 			dev_dbg(dev, "%s(): %d\n", __func__, mrq->data->error);
1243*4882a593Smuzhiyun 		}
1244*4882a593Smuzhiyun 		break;
1245*4882a593Smuzhiyun 	default:
1246*4882a593Smuzhiyun 		BUG();
1247*4882a593Smuzhiyun 	}
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	if (wait) {
1250*4882a593Smuzhiyun 		schedule_delayed_work(&host->timeout_work, host->timeout);
1251*4882a593Smuzhiyun 		/* Wait for more data */
1252*4882a593Smuzhiyun 		mutex_unlock(&host->thread_lock);
1253*4882a593Smuzhiyun 		return IRQ_HANDLED;
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1257*4882a593Smuzhiyun 		struct mmc_data *data = mrq->data;
1258*4882a593Smuzhiyun 		if (!mrq->cmd->error && data && !data->error)
1259*4882a593Smuzhiyun 			data->bytes_xfered =
1260*4882a593Smuzhiyun 				data->blocks * data->blksz;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
1263*4882a593Smuzhiyun 			sh_mmcif_stop_cmd(host, mrq);
1264*4882a593Smuzhiyun 			if (!mrq->stop->error) {
1265*4882a593Smuzhiyun 				schedule_delayed_work(&host->timeout_work, host->timeout);
1266*4882a593Smuzhiyun 				mutex_unlock(&host->thread_lock);
1267*4882a593Smuzhiyun 				return IRQ_HANDLED;
1268*4882a593Smuzhiyun 			}
1269*4882a593Smuzhiyun 		}
1270*4882a593Smuzhiyun 	}
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1273*4882a593Smuzhiyun 	host->state = STATE_IDLE;
1274*4882a593Smuzhiyun 	host->mrq = NULL;
1275*4882a593Smuzhiyun 	mmc_request_done(host->mmc, mrq);
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	mutex_unlock(&host->thread_lock);
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	return IRQ_HANDLED;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun 
sh_mmcif_intr(int irq,void * dev_id)1282*4882a593Smuzhiyun static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	struct sh_mmcif_host *host = dev_id;
1285*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
1286*4882a593Smuzhiyun 	u32 state, mask;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
1289*4882a593Smuzhiyun 	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK);
1290*4882a593Smuzhiyun 	if (host->ccs_enable)
1291*4882a593Smuzhiyun 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask));
1292*4882a593Smuzhiyun 	else
1293*4882a593Smuzhiyun 		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask));
1294*4882a593Smuzhiyun 	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN);
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	if (state & ~MASK_CLEAN)
1297*4882a593Smuzhiyun 		dev_dbg(dev, "IRQ state = 0x%08x incompletely cleared\n",
1298*4882a593Smuzhiyun 			state);
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	if (state & INT_ERR_STS || state & ~INT_ALL) {
1301*4882a593Smuzhiyun 		host->sd_error = true;
1302*4882a593Smuzhiyun 		dev_dbg(dev, "int err state = 0x%08x\n", state);
1303*4882a593Smuzhiyun 	}
1304*4882a593Smuzhiyun 	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
1305*4882a593Smuzhiyun 		if (!host->mrq)
1306*4882a593Smuzhiyun 			dev_dbg(dev, "NULL IRQ state = 0x%08x\n", state);
1307*4882a593Smuzhiyun 		if (!host->dma_active)
1308*4882a593Smuzhiyun 			return IRQ_WAKE_THREAD;
1309*4882a593Smuzhiyun 		else if (host->sd_error)
1310*4882a593Smuzhiyun 			sh_mmcif_dma_complete(host);
1311*4882a593Smuzhiyun 	} else {
1312*4882a593Smuzhiyun 		dev_dbg(dev, "Unexpected IRQ 0x%x\n", state);
1313*4882a593Smuzhiyun 	}
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	return IRQ_HANDLED;
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun 
sh_mmcif_timeout_work(struct work_struct * work)1318*4882a593Smuzhiyun static void sh_mmcif_timeout_work(struct work_struct *work)
1319*4882a593Smuzhiyun {
1320*4882a593Smuzhiyun 	struct delayed_work *d = to_delayed_work(work);
1321*4882a593Smuzhiyun 	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1322*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
1323*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
1324*4882a593Smuzhiyun 	unsigned long flags;
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	if (host->dying)
1327*4882a593Smuzhiyun 		/* Don't run after mmc_remove_host() */
1328*4882a593Smuzhiyun 		return;
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	spin_lock_irqsave(&host->lock, flags);
1331*4882a593Smuzhiyun 	if (host->state == STATE_IDLE) {
1332*4882a593Smuzhiyun 		spin_unlock_irqrestore(&host->lock, flags);
1333*4882a593Smuzhiyun 		return;
1334*4882a593Smuzhiyun 	}
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	dev_err(dev, "Timeout waiting for %u on CMD%u\n",
1337*4882a593Smuzhiyun 		host->wait_for, mrq->cmd->opcode);
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	host->state = STATE_TIMEOUT;
1340*4882a593Smuzhiyun 	spin_unlock_irqrestore(&host->lock, flags);
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	/*
1343*4882a593Smuzhiyun 	 * Handle races with cancel_delayed_work(), unless
1344*4882a593Smuzhiyun 	 * cancel_delayed_work_sync() is used
1345*4882a593Smuzhiyun 	 */
1346*4882a593Smuzhiyun 	switch (host->wait_for) {
1347*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_CMD:
1348*4882a593Smuzhiyun 		mrq->cmd->error = sh_mmcif_error_manage(host);
1349*4882a593Smuzhiyun 		break;
1350*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_STOP:
1351*4882a593Smuzhiyun 		mrq->stop->error = sh_mmcif_error_manage(host);
1352*4882a593Smuzhiyun 		break;
1353*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_MREAD:
1354*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_MWRITE:
1355*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_READ:
1356*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_WRITE:
1357*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_READ_END:
1358*4882a593Smuzhiyun 	case MMCIF_WAIT_FOR_WRITE_END:
1359*4882a593Smuzhiyun 		mrq->data->error = sh_mmcif_error_manage(host);
1360*4882a593Smuzhiyun 		break;
1361*4882a593Smuzhiyun 	default:
1362*4882a593Smuzhiyun 		BUG();
1363*4882a593Smuzhiyun 	}
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	host->state = STATE_IDLE;
1366*4882a593Smuzhiyun 	host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1367*4882a593Smuzhiyun 	host->mrq = NULL;
1368*4882a593Smuzhiyun 	mmc_request_done(host->mmc, mrq);
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun 
sh_mmcif_init_ocr(struct sh_mmcif_host * host)1371*4882a593Smuzhiyun static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun 	struct device *dev = sh_mmcif_host_to_dev(host);
1374*4882a593Smuzhiyun 	struct sh_mmcif_plat_data *pd = dev->platform_data;
1375*4882a593Smuzhiyun 	struct mmc_host *mmc = host->mmc;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	mmc_regulator_get_supply(mmc);
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	if (!pd)
1380*4882a593Smuzhiyun 		return;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	if (!mmc->ocr_avail)
1383*4882a593Smuzhiyun 		mmc->ocr_avail = pd->ocr;
1384*4882a593Smuzhiyun 	else if (pd->ocr)
1385*4882a593Smuzhiyun 		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun 
sh_mmcif_probe(struct platform_device * pdev)1388*4882a593Smuzhiyun static int sh_mmcif_probe(struct platform_device *pdev)
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun 	int ret = 0, irq[2];
1391*4882a593Smuzhiyun 	struct mmc_host *mmc;
1392*4882a593Smuzhiyun 	struct sh_mmcif_host *host;
1393*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
1394*4882a593Smuzhiyun 	struct sh_mmcif_plat_data *pd = dev->platform_data;
1395*4882a593Smuzhiyun 	void __iomem *reg;
1396*4882a593Smuzhiyun 	const char *name;
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	irq[0] = platform_get_irq(pdev, 0);
1399*4882a593Smuzhiyun 	irq[1] = platform_get_irq_optional(pdev, 1);
1400*4882a593Smuzhiyun 	if (irq[0] < 0)
1401*4882a593Smuzhiyun 		return -ENXIO;
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	reg = devm_platform_ioremap_resource(pdev, 0);
1404*4882a593Smuzhiyun 	if (IS_ERR(reg))
1405*4882a593Smuzhiyun 		return PTR_ERR(reg);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), dev);
1408*4882a593Smuzhiyun 	if (!mmc)
1409*4882a593Smuzhiyun 		return -ENOMEM;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	ret = mmc_of_parse(mmc);
1412*4882a593Smuzhiyun 	if (ret < 0)
1413*4882a593Smuzhiyun 		goto err_host;
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	host		= mmc_priv(mmc);
1416*4882a593Smuzhiyun 	host->mmc	= mmc;
1417*4882a593Smuzhiyun 	host->addr	= reg;
1418*4882a593Smuzhiyun 	host->timeout	= msecs_to_jiffies(10000);
1419*4882a593Smuzhiyun 	host->ccs_enable = true;
1420*4882a593Smuzhiyun 	host->clk_ctrl2_enable = false;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	host->pd = pdev;
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	spin_lock_init(&host->lock);
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	mmc->ops = &sh_mmcif_ops;
1427*4882a593Smuzhiyun 	sh_mmcif_init_ocr(host);
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY;
1430*4882a593Smuzhiyun 	mmc->caps2 |= MMC_CAP2_NO_SD | MMC_CAP2_NO_SDIO;
1431*4882a593Smuzhiyun 	mmc->max_busy_timeout = 10000;
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	if (pd && pd->caps)
1434*4882a593Smuzhiyun 		mmc->caps |= pd->caps;
1435*4882a593Smuzhiyun 	mmc->max_segs = 32;
1436*4882a593Smuzhiyun 	mmc->max_blk_size = 512;
1437*4882a593Smuzhiyun 	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1438*4882a593Smuzhiyun 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1439*4882a593Smuzhiyun 	mmc->max_seg_size = mmc->max_req_size;
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	platform_set_drvdata(pdev, host);
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	host->clk = devm_clk_get(dev, NULL);
1444*4882a593Smuzhiyun 	if (IS_ERR(host->clk)) {
1445*4882a593Smuzhiyun 		ret = PTR_ERR(host->clk);
1446*4882a593Smuzhiyun 		dev_err(dev, "cannot get clock: %d\n", ret);
1447*4882a593Smuzhiyun 		goto err_host;
1448*4882a593Smuzhiyun 	}
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	ret = clk_prepare_enable(host->clk);
1451*4882a593Smuzhiyun 	if (ret < 0)
1452*4882a593Smuzhiyun 		goto err_host;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	sh_mmcif_clk_setup(host);
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	pm_runtime_enable(dev);
1457*4882a593Smuzhiyun 	host->power = false;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(dev);
1460*4882a593Smuzhiyun 	if (ret < 0)
1461*4882a593Smuzhiyun 		goto err_clk;
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&host->timeout_work, sh_mmcif_timeout_work);
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	sh_mmcif_sync_reset(host);
1466*4882a593Smuzhiyun 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	name = irq[1] < 0 ? dev_name(dev) : "sh_mmc:error";
1469*4882a593Smuzhiyun 	ret = devm_request_threaded_irq(dev, irq[0], sh_mmcif_intr,
1470*4882a593Smuzhiyun 					sh_mmcif_irqt, 0, name, host);
1471*4882a593Smuzhiyun 	if (ret) {
1472*4882a593Smuzhiyun 		dev_err(dev, "request_irq error (%s)\n", name);
1473*4882a593Smuzhiyun 		goto err_clk;
1474*4882a593Smuzhiyun 	}
1475*4882a593Smuzhiyun 	if (irq[1] >= 0) {
1476*4882a593Smuzhiyun 		ret = devm_request_threaded_irq(dev, irq[1],
1477*4882a593Smuzhiyun 						sh_mmcif_intr, sh_mmcif_irqt,
1478*4882a593Smuzhiyun 						0, "sh_mmc:int", host);
1479*4882a593Smuzhiyun 		if (ret) {
1480*4882a593Smuzhiyun 			dev_err(dev, "request_irq error (sh_mmc:int)\n");
1481*4882a593Smuzhiyun 			goto err_clk;
1482*4882a593Smuzhiyun 		}
1483*4882a593Smuzhiyun 	}
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	mutex_init(&host->thread_lock);
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	ret = mmc_add_host(mmc);
1488*4882a593Smuzhiyun 	if (ret < 0)
1489*4882a593Smuzhiyun 		goto err_clk;
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 	dev_pm_qos_expose_latency_limit(dev, 100);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	dev_info(dev, "Chip version 0x%04x, clock rate %luMHz\n",
1494*4882a593Smuzhiyun 		 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0xffff,
1495*4882a593Smuzhiyun 		 clk_get_rate(host->clk) / 1000000UL);
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	pm_runtime_put(dev);
1498*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
1499*4882a593Smuzhiyun 	return ret;
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun err_clk:
1502*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
1503*4882a593Smuzhiyun 	pm_runtime_put_sync(dev);
1504*4882a593Smuzhiyun 	pm_runtime_disable(dev);
1505*4882a593Smuzhiyun err_host:
1506*4882a593Smuzhiyun 	mmc_free_host(mmc);
1507*4882a593Smuzhiyun 	return ret;
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun 
sh_mmcif_remove(struct platform_device * pdev)1510*4882a593Smuzhiyun static int sh_mmcif_remove(struct platform_device *pdev)
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun 	struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	host->dying = true;
1515*4882a593Smuzhiyun 	clk_prepare_enable(host->clk);
1516*4882a593Smuzhiyun 	pm_runtime_get_sync(&pdev->dev);
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	dev_pm_qos_hide_latency_limit(&pdev->dev);
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	mmc_remove_host(host->mmc);
1521*4882a593Smuzhiyun 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	/*
1524*4882a593Smuzhiyun 	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1525*4882a593Smuzhiyun 	 * mmc_remove_host() call above. But swapping order doesn't help either
1526*4882a593Smuzhiyun 	 * (a query on the linux-mmc mailing list didn't bring any replies).
1527*4882a593Smuzhiyun 	 */
1528*4882a593Smuzhiyun 	cancel_delayed_work_sync(&host->timeout_work);
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
1531*4882a593Smuzhiyun 	mmc_free_host(host->mmc);
1532*4882a593Smuzhiyun 	pm_runtime_put_sync(&pdev->dev);
1533*4882a593Smuzhiyun 	pm_runtime_disable(&pdev->dev);
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	return 0;
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
sh_mmcif_suspend(struct device * dev)1539*4882a593Smuzhiyun static int sh_mmcif_suspend(struct device *dev)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun 	struct sh_mmcif_host *host = dev_get_drvdata(dev);
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	pm_runtime_get_sync(dev);
1544*4882a593Smuzhiyun 	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1545*4882a593Smuzhiyun 	pm_runtime_put(dev);
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	return 0;
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun 
sh_mmcif_resume(struct device * dev)1550*4882a593Smuzhiyun static int sh_mmcif_resume(struct device *dev)
1551*4882a593Smuzhiyun {
1552*4882a593Smuzhiyun 	return 0;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun #endif
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1557*4882a593Smuzhiyun 	SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume)
1558*4882a593Smuzhiyun };
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun static struct platform_driver sh_mmcif_driver = {
1561*4882a593Smuzhiyun 	.probe		= sh_mmcif_probe,
1562*4882a593Smuzhiyun 	.remove		= sh_mmcif_remove,
1563*4882a593Smuzhiyun 	.driver		= {
1564*4882a593Smuzhiyun 		.name	= DRIVER_NAME,
1565*4882a593Smuzhiyun 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1566*4882a593Smuzhiyun 		.pm	= &sh_mmcif_dev_pm_ops,
1567*4882a593Smuzhiyun 		.of_match_table = sh_mmcif_of_match,
1568*4882a593Smuzhiyun 	},
1569*4882a593Smuzhiyun };
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun module_platform_driver(sh_mmcif_driver);
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1574*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1575*4882a593Smuzhiyun MODULE_ALIAS("platform:" DRIVER_NAME);
1576*4882a593Smuzhiyun MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");
1577