xref: /OK3568_Linux_fs/kernel/drivers/mmc/host/usdhi6rol0.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
4*4882a593Smuzhiyun  * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/clk.h>
8*4882a593Smuzhiyun #include <linux/delay.h>
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/dmaengine.h>
12*4882a593Smuzhiyun #include <linux/highmem.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/io.h>
15*4882a593Smuzhiyun #include <linux/log2.h>
16*4882a593Smuzhiyun #include <linux/mmc/host.h>
17*4882a593Smuzhiyun #include <linux/mmc/mmc.h>
18*4882a593Smuzhiyun #include <linux/mmc/sd.h>
19*4882a593Smuzhiyun #include <linux/mmc/sdio.h>
20*4882a593Smuzhiyun #include <linux/module.h>
21*4882a593Smuzhiyun #include <linux/pagemap.h>
22*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
23*4882a593Smuzhiyun #include <linux/platform_device.h>
24*4882a593Smuzhiyun #include <linux/scatterlist.h>
25*4882a593Smuzhiyun #include <linux/string.h>
26*4882a593Smuzhiyun #include <linux/time.h>
27*4882a593Smuzhiyun #include <linux/virtio.h>
28*4882a593Smuzhiyun #include <linux/workqueue.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define USDHI6_SD_CMD		0x0000
31*4882a593Smuzhiyun #define USDHI6_SD_PORT_SEL	0x0004
32*4882a593Smuzhiyun #define USDHI6_SD_ARG		0x0008
33*4882a593Smuzhiyun #define USDHI6_SD_STOP		0x0010
34*4882a593Smuzhiyun #define USDHI6_SD_SECCNT	0x0014
35*4882a593Smuzhiyun #define USDHI6_SD_RSP10		0x0018
36*4882a593Smuzhiyun #define USDHI6_SD_RSP32		0x0020
37*4882a593Smuzhiyun #define USDHI6_SD_RSP54		0x0028
38*4882a593Smuzhiyun #define USDHI6_SD_RSP76		0x0030
39*4882a593Smuzhiyun #define USDHI6_SD_INFO1		0x0038
40*4882a593Smuzhiyun #define USDHI6_SD_INFO2		0x003c
41*4882a593Smuzhiyun #define USDHI6_SD_INFO1_MASK	0x0040
42*4882a593Smuzhiyun #define USDHI6_SD_INFO2_MASK	0x0044
43*4882a593Smuzhiyun #define USDHI6_SD_CLK_CTRL	0x0048
44*4882a593Smuzhiyun #define USDHI6_SD_SIZE		0x004c
45*4882a593Smuzhiyun #define USDHI6_SD_OPTION	0x0050
46*4882a593Smuzhiyun #define USDHI6_SD_ERR_STS1	0x0058
47*4882a593Smuzhiyun #define USDHI6_SD_ERR_STS2	0x005c
48*4882a593Smuzhiyun #define USDHI6_SD_BUF0		0x0060
49*4882a593Smuzhiyun #define USDHI6_SDIO_MODE	0x0068
50*4882a593Smuzhiyun #define USDHI6_SDIO_INFO1	0x006c
51*4882a593Smuzhiyun #define USDHI6_SDIO_INFO1_MASK	0x0070
52*4882a593Smuzhiyun #define USDHI6_CC_EXT_MODE	0x01b0
53*4882a593Smuzhiyun #define USDHI6_SOFT_RST		0x01c0
54*4882a593Smuzhiyun #define USDHI6_VERSION		0x01c4
55*4882a593Smuzhiyun #define USDHI6_HOST_MODE	0x01c8
56*4882a593Smuzhiyun #define USDHI6_SDIF_MODE	0x01cc
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define USDHI6_SD_CMD_APP		0x0040
59*4882a593Smuzhiyun #define USDHI6_SD_CMD_MODE_RSP_AUTO	0x0000
60*4882a593Smuzhiyun #define USDHI6_SD_CMD_MODE_RSP_NONE	0x0300
61*4882a593Smuzhiyun #define USDHI6_SD_CMD_MODE_RSP_R1	0x0400	/* Also R5, R6, R7 */
62*4882a593Smuzhiyun #define USDHI6_SD_CMD_MODE_RSP_R1B	0x0500	/* R1b */
63*4882a593Smuzhiyun #define USDHI6_SD_CMD_MODE_RSP_R2	0x0600
64*4882a593Smuzhiyun #define USDHI6_SD_CMD_MODE_RSP_R3	0x0700	/* Also R4 */
65*4882a593Smuzhiyun #define USDHI6_SD_CMD_DATA		0x0800
66*4882a593Smuzhiyun #define USDHI6_SD_CMD_READ		0x1000
67*4882a593Smuzhiyun #define USDHI6_SD_CMD_MULTI		0x2000
68*4882a593Smuzhiyun #define USDHI6_SD_CMD_CMD12_AUTO_OFF	0x4000
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define USDHI6_CC_EXT_MODE_SDRW		BIT(1)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define USDHI6_SD_INFO1_RSP_END		BIT(0)
73*4882a593Smuzhiyun #define USDHI6_SD_INFO1_ACCESS_END	BIT(2)
74*4882a593Smuzhiyun #define USDHI6_SD_INFO1_CARD_OUT	BIT(3)
75*4882a593Smuzhiyun #define USDHI6_SD_INFO1_CARD_IN		BIT(4)
76*4882a593Smuzhiyun #define USDHI6_SD_INFO1_CD		BIT(5)
77*4882a593Smuzhiyun #define USDHI6_SD_INFO1_WP		BIT(7)
78*4882a593Smuzhiyun #define USDHI6_SD_INFO1_D3_CARD_OUT	BIT(8)
79*4882a593Smuzhiyun #define USDHI6_SD_INFO1_D3_CARD_IN	BIT(9)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define USDHI6_SD_INFO2_CMD_ERR		BIT(0)
82*4882a593Smuzhiyun #define USDHI6_SD_INFO2_CRC_ERR		BIT(1)
83*4882a593Smuzhiyun #define USDHI6_SD_INFO2_END_ERR		BIT(2)
84*4882a593Smuzhiyun #define USDHI6_SD_INFO2_TOUT		BIT(3)
85*4882a593Smuzhiyun #define USDHI6_SD_INFO2_IWA_ERR		BIT(4)
86*4882a593Smuzhiyun #define USDHI6_SD_INFO2_IRA_ERR		BIT(5)
87*4882a593Smuzhiyun #define USDHI6_SD_INFO2_RSP_TOUT	BIT(6)
88*4882a593Smuzhiyun #define USDHI6_SD_INFO2_SDDAT0		BIT(7)
89*4882a593Smuzhiyun #define USDHI6_SD_INFO2_BRE		BIT(8)
90*4882a593Smuzhiyun #define USDHI6_SD_INFO2_BWE		BIT(9)
91*4882a593Smuzhiyun #define USDHI6_SD_INFO2_SCLKDIVEN	BIT(13)
92*4882a593Smuzhiyun #define USDHI6_SD_INFO2_CBSY		BIT(14)
93*4882a593Smuzhiyun #define USDHI6_SD_INFO2_ILA		BIT(15)
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN)
96*4882a593Smuzhiyun #define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT)
97*4882a593Smuzhiyun #define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT)
98*4882a593Smuzhiyun #define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT)
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define USDHI6_SD_INFO2_ERR	(USDHI6_SD_INFO2_CMD_ERR |	\
101*4882a593Smuzhiyun 	USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR |	\
102*4882a593Smuzhiyun 	USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR |	\
103*4882a593Smuzhiyun 	USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT |	\
104*4882a593Smuzhiyun 	USDHI6_SD_INFO2_ILA)
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define USDHI6_SD_INFO1_IRQ	(USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \
107*4882a593Smuzhiyun 				 USDHI6_SD_INFO1_CARD)
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #define USDHI6_SD_INFO2_IRQ	(USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \
110*4882a593Smuzhiyun 				 USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA)
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define USDHI6_SD_CLK_CTRL_SCLKEN	BIT(8)
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #define USDHI6_SD_STOP_STP		BIT(0)
115*4882a593Smuzhiyun #define USDHI6_SD_STOP_SEC		BIT(8)
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #define USDHI6_SDIO_INFO1_IOIRQ		BIT(0)
118*4882a593Smuzhiyun #define USDHI6_SDIO_INFO1_EXPUB52	BIT(14)
119*4882a593Smuzhiyun #define USDHI6_SDIO_INFO1_EXWT		BIT(15)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define USDHI6_SD_ERR_STS1_CRC_NO_ERROR	BIT(13)
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define USDHI6_SOFT_RST_RESERVED	(BIT(1) | BIT(2))
124*4882a593Smuzhiyun #define USDHI6_SOFT_RST_RESET		BIT(0)
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define USDHI6_SD_OPTION_TIMEOUT_SHIFT	4
127*4882a593Smuzhiyun #define USDHI6_SD_OPTION_TIMEOUT_MASK	(0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT)
128*4882a593Smuzhiyun #define USDHI6_SD_OPTION_WIDTH_1	BIT(15)
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun #define USDHI6_SD_PORT_SEL_PORTS_SHIFT	8
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun #define USDHI6_SD_CLK_CTRL_DIV_MASK	0xff
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define USDHI6_SDIO_INFO1_IRQ	(USDHI6_SDIO_INFO1_IOIRQ | 3 | \
135*4882a593Smuzhiyun 				 USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT)
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #define USDHI6_MIN_DMA 64
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun #define USDHI6_REQ_TIMEOUT_MS 4000
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun enum usdhi6_wait_for {
142*4882a593Smuzhiyun 	USDHI6_WAIT_FOR_REQUEST,
143*4882a593Smuzhiyun 	USDHI6_WAIT_FOR_CMD,
144*4882a593Smuzhiyun 	USDHI6_WAIT_FOR_MREAD,
145*4882a593Smuzhiyun 	USDHI6_WAIT_FOR_MWRITE,
146*4882a593Smuzhiyun 	USDHI6_WAIT_FOR_READ,
147*4882a593Smuzhiyun 	USDHI6_WAIT_FOR_WRITE,
148*4882a593Smuzhiyun 	USDHI6_WAIT_FOR_DATA_END,
149*4882a593Smuzhiyun 	USDHI6_WAIT_FOR_STOP,
150*4882a593Smuzhiyun 	USDHI6_WAIT_FOR_DMA,
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun struct usdhi6_page {
154*4882a593Smuzhiyun 	struct page *page;
155*4882a593Smuzhiyun 	void *mapped;		/* mapped page */
156*4882a593Smuzhiyun };
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun struct usdhi6_host {
159*4882a593Smuzhiyun 	struct mmc_host *mmc;
160*4882a593Smuzhiyun 	struct mmc_request *mrq;
161*4882a593Smuzhiyun 	void __iomem *base;
162*4882a593Smuzhiyun 	struct clk *clk;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* SG memory handling */
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* Common for multiple and single block requests */
167*4882a593Smuzhiyun 	struct usdhi6_page pg;	/* current page from an SG */
168*4882a593Smuzhiyun 	void *blk_page;		/* either a mapped page, or the bounce buffer */
169*4882a593Smuzhiyun 	size_t offset;		/* offset within a page, including sg->offset */
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	/* Blocks, crossing a page boundary */
172*4882a593Smuzhiyun 	size_t head_len;
173*4882a593Smuzhiyun 	struct usdhi6_page head_pg;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */
176*4882a593Smuzhiyun 	struct scatterlist bounce_sg;
177*4882a593Smuzhiyun 	u8 bounce_buf[512];
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* Multiple block requests only */
180*4882a593Smuzhiyun 	struct scatterlist *sg;	/* current SG segment */
181*4882a593Smuzhiyun 	int page_idx;		/* page index within an SG segment */
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	enum usdhi6_wait_for wait;
184*4882a593Smuzhiyun 	u32 status_mask;
185*4882a593Smuzhiyun 	u32 status2_mask;
186*4882a593Smuzhiyun 	u32 sdio_mask;
187*4882a593Smuzhiyun 	u32 io_error;
188*4882a593Smuzhiyun 	u32 irq_status;
189*4882a593Smuzhiyun 	unsigned long imclk;
190*4882a593Smuzhiyun 	unsigned long rate;
191*4882a593Smuzhiyun 	bool app_cmd;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	/* Timeout handling */
194*4882a593Smuzhiyun 	struct delayed_work timeout_work;
195*4882a593Smuzhiyun 	unsigned long timeout;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	/* DMA support */
198*4882a593Smuzhiyun 	struct dma_chan *chan_rx;
199*4882a593Smuzhiyun 	struct dma_chan *chan_tx;
200*4882a593Smuzhiyun 	bool dma_active;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* Pin control */
203*4882a593Smuzhiyun 	struct pinctrl *pinctrl;
204*4882a593Smuzhiyun 	struct pinctrl_state *pins_uhs;
205*4882a593Smuzhiyun };
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /*			I/O primitives					*/
208*4882a593Smuzhiyun 
usdhi6_write(struct usdhi6_host * host,u32 reg,u32 data)209*4882a593Smuzhiyun static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	iowrite32(data, host->base + reg);
212*4882a593Smuzhiyun 	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
213*4882a593Smuzhiyun 		host->base, reg, data);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
usdhi6_write16(struct usdhi6_host * host,u32 reg,u16 data)216*4882a593Smuzhiyun static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	iowrite16(data, host->base + reg);
219*4882a593Smuzhiyun 	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
220*4882a593Smuzhiyun 		host->base, reg, data);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
usdhi6_read(struct usdhi6_host * host,u32 reg)223*4882a593Smuzhiyun static u32 usdhi6_read(struct usdhi6_host *host, u32 reg)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	u32 data = ioread32(host->base + reg);
226*4882a593Smuzhiyun 	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
227*4882a593Smuzhiyun 		host->base, reg, data);
228*4882a593Smuzhiyun 	return data;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
usdhi6_read16(struct usdhi6_host * host,u32 reg)231*4882a593Smuzhiyun static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	u16 data = ioread16(host->base + reg);
234*4882a593Smuzhiyun 	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__,
235*4882a593Smuzhiyun 		host->base, reg, data);
236*4882a593Smuzhiyun 	return data;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
usdhi6_irq_enable(struct usdhi6_host * host,u32 info1,u32 info2)239*4882a593Smuzhiyun static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1;
242*4882a593Smuzhiyun 	host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2;
243*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask);
244*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
usdhi6_wait_for_resp(struct usdhi6_host * host)247*4882a593Smuzhiyun static void usdhi6_wait_for_resp(struct usdhi6_host *host)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END |
250*4882a593Smuzhiyun 			  USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD,
251*4882a593Smuzhiyun 			  USDHI6_SD_INFO2_ERR);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
usdhi6_wait_for_brwe(struct usdhi6_host * host,bool read)254*4882a593Smuzhiyun static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END |
257*4882a593Smuzhiyun 			  USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR |
258*4882a593Smuzhiyun 			  (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE));
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
usdhi6_only_cd(struct usdhi6_host * host)261*4882a593Smuzhiyun static void usdhi6_only_cd(struct usdhi6_host *host)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	/* Mask all except card hotplug */
264*4882a593Smuzhiyun 	usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
usdhi6_mask_all(struct usdhi6_host * host)267*4882a593Smuzhiyun static void usdhi6_mask_all(struct usdhi6_host *host)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	usdhi6_irq_enable(host, 0, 0);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
usdhi6_error_code(struct usdhi6_host * host)272*4882a593Smuzhiyun static int usdhi6_error_code(struct usdhi6_host *host)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	u32 err;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (host->io_error &
279*4882a593Smuzhiyun 	    (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) {
280*4882a593Smuzhiyun 		u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54);
281*4882a593Smuzhiyun 		int opc = host->mrq ? host->mrq->cmd->opcode : -1;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		err = usdhi6_read(host, USDHI6_SD_ERR_STS2);
284*4882a593Smuzhiyun 		/* Response timeout is often normal, don't spam the log */
285*4882a593Smuzhiyun 		if (host->wait == USDHI6_WAIT_FOR_CMD)
286*4882a593Smuzhiyun 			dev_dbg(mmc_dev(host->mmc),
287*4882a593Smuzhiyun 				"T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
288*4882a593Smuzhiyun 				err, rsp54, host->wait, opc);
289*4882a593Smuzhiyun 		else
290*4882a593Smuzhiyun 			dev_warn(mmc_dev(host->mmc),
291*4882a593Smuzhiyun 				 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n",
292*4882a593Smuzhiyun 				 err, rsp54, host->wait, opc);
293*4882a593Smuzhiyun 		return -ETIMEDOUT;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	err = usdhi6_read(host, USDHI6_SD_ERR_STS1);
297*4882a593Smuzhiyun 	if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR)
298*4882a593Smuzhiyun 		dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n",
299*4882a593Smuzhiyun 			 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1);
300*4882a593Smuzhiyun 	if (host->io_error & USDHI6_SD_INFO2_ILA)
301*4882a593Smuzhiyun 		return -EILSEQ;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	return -EIO;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun /*			Scatter-Gather management			*/
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun  * In PIO mode we have to map each page separately, using kmap(). That way
310*4882a593Smuzhiyun  * adjacent pages are mapped to non-adjacent virtual addresses. That's why we
311*4882a593Smuzhiyun  * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks
312*4882a593Smuzhiyun  * have been observed with an SDIO WiFi card (b43 driver).
313*4882a593Smuzhiyun  */
usdhi6_blk_bounce(struct usdhi6_host * host,struct scatterlist * sg)314*4882a593Smuzhiyun static void usdhi6_blk_bounce(struct usdhi6_host *host,
315*4882a593Smuzhiyun 			      struct scatterlist *sg)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
318*4882a593Smuzhiyun 	size_t blk_head = host->head_len;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n",
321*4882a593Smuzhiyun 		__func__, host->mrq->cmd->opcode, data->sg_len,
322*4882a593Smuzhiyun 		data->blksz, data->blocks, sg->offset);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	host->head_pg.page	= host->pg.page;
325*4882a593Smuzhiyun 	host->head_pg.mapped	= host->pg.mapped;
326*4882a593Smuzhiyun 	host->pg.page		= nth_page(host->pg.page, 1);
327*4882a593Smuzhiyun 	host->pg.mapped		= kmap(host->pg.page);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	host->blk_page = host->bounce_buf;
330*4882a593Smuzhiyun 	host->offset = 0;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (data->flags & MMC_DATA_READ)
333*4882a593Smuzhiyun 		return;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head,
336*4882a593Smuzhiyun 	       blk_head);
337*4882a593Smuzhiyun 	memcpy(host->bounce_buf + blk_head, host->pg.mapped,
338*4882a593Smuzhiyun 	       data->blksz - blk_head);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /* Only called for multiple block IO */
usdhi6_sg_prep(struct usdhi6_host * host)342*4882a593Smuzhiyun static void usdhi6_sg_prep(struct usdhi6_host *host)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
345*4882a593Smuzhiyun 	struct mmc_data *data = mrq->data;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	host->sg = data->sg;
350*4882a593Smuzhiyun 	/* TODO: if we always map, this is redundant */
351*4882a593Smuzhiyun 	host->offset = host->sg->offset;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /* Map the first page in an SG segment: common for multiple and single block IO */
usdhi6_sg_map(struct usdhi6_host * host)355*4882a593Smuzhiyun static void *usdhi6_sg_map(struct usdhi6_host *host)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
358*4882a593Smuzhiyun 	struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg;
359*4882a593Smuzhiyun 	size_t head = PAGE_SIZE - sg->offset;
360*4882a593Smuzhiyun 	size_t blk_head = head % data->blksz;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page);
363*4882a593Smuzhiyun 	if (WARN(sg_dma_len(sg) % data->blksz,
364*4882a593Smuzhiyun 		 "SG size %u isn't a multiple of block size %u\n",
365*4882a593Smuzhiyun 		 sg_dma_len(sg), data->blksz))
366*4882a593Smuzhiyun 		return NULL;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	host->pg.page = sg_page(sg);
369*4882a593Smuzhiyun 	host->pg.mapped = kmap(host->pg.page);
370*4882a593Smuzhiyun 	host->offset = sg->offset;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	/*
373*4882a593Smuzhiyun 	 * Block size must be a power of 2 for multi-block transfers,
374*4882a593Smuzhiyun 	 * therefore blk_head is equal for all pages in this SG
375*4882a593Smuzhiyun 	 */
376*4882a593Smuzhiyun 	host->head_len = blk_head;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (head < data->blksz)
379*4882a593Smuzhiyun 		/*
380*4882a593Smuzhiyun 		 * The first block in the SG crosses a page boundary.
381*4882a593Smuzhiyun 		 * Max blksz = 512, so blocks can only span 2 pages
382*4882a593Smuzhiyun 		 */
383*4882a593Smuzhiyun 		usdhi6_blk_bounce(host, sg);
384*4882a593Smuzhiyun 	else
385*4882a593Smuzhiyun 		host->blk_page = host->pg.mapped;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n",
388*4882a593Smuzhiyun 		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
389*4882a593Smuzhiyun 		sg->offset, host->mrq->cmd->opcode, host->mrq);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	return host->blk_page + host->offset;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /* Unmap the current page: common for multiple and single block IO */
usdhi6_sg_unmap(struct usdhi6_host * host,bool force)395*4882a593Smuzhiyun static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
398*4882a593Smuzhiyun 	struct page *page = host->head_pg.page;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (page) {
401*4882a593Smuzhiyun 		/* Previous block was cross-page boundary */
402*4882a593Smuzhiyun 		struct scatterlist *sg = data->sg_len > 1 ?
403*4882a593Smuzhiyun 			host->sg : data->sg;
404*4882a593Smuzhiyun 		size_t blk_head = host->head_len;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		if (!data->error && data->flags & MMC_DATA_READ) {
407*4882a593Smuzhiyun 			memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head,
408*4882a593Smuzhiyun 			       host->bounce_buf, blk_head);
409*4882a593Smuzhiyun 			memcpy(host->pg.mapped, host->bounce_buf + blk_head,
410*4882a593Smuzhiyun 			       data->blksz - blk_head);
411*4882a593Smuzhiyun 		}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		flush_dcache_page(page);
414*4882a593Smuzhiyun 		kunmap(page);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 		host->head_pg.page = NULL;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 		if (!force && sg_dma_len(sg) + sg->offset >
419*4882a593Smuzhiyun 		    (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head)
420*4882a593Smuzhiyun 			/* More blocks in this SG, don't unmap the next page */
421*4882a593Smuzhiyun 			return;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	page = host->pg.page;
425*4882a593Smuzhiyun 	if (!page)
426*4882a593Smuzhiyun 		return;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	flush_dcache_page(page);
429*4882a593Smuzhiyun 	kunmap(page);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	host->pg.page = NULL;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */
usdhi6_sg_advance(struct usdhi6_host * host)435*4882a593Smuzhiyun static void usdhi6_sg_advance(struct usdhi6_host *host)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
438*4882a593Smuzhiyun 	size_t done, total;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	/* New offset: set at the end of the previous block */
441*4882a593Smuzhiyun 	if (host->head_pg.page) {
442*4882a593Smuzhiyun 		/* Finished a cross-page block, jump to the new page */
443*4882a593Smuzhiyun 		host->page_idx++;
444*4882a593Smuzhiyun 		host->offset = data->blksz - host->head_len;
445*4882a593Smuzhiyun 		host->blk_page = host->pg.mapped;
446*4882a593Smuzhiyun 		usdhi6_sg_unmap(host, false);
447*4882a593Smuzhiyun 	} else {
448*4882a593Smuzhiyun 		host->offset += data->blksz;
449*4882a593Smuzhiyun 		/* The completed block didn't cross a page boundary */
450*4882a593Smuzhiyun 		if (host->offset == PAGE_SIZE) {
451*4882a593Smuzhiyun 			/* If required, we'll map the page below */
452*4882a593Smuzhiyun 			host->offset = 0;
453*4882a593Smuzhiyun 			host->page_idx++;
454*4882a593Smuzhiyun 		}
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/*
458*4882a593Smuzhiyun 	 * Now host->blk_page + host->offset point at the end of our last block
459*4882a593Smuzhiyun 	 * and host->page_idx is the index of the page, in which our new block
460*4882a593Smuzhiyun 	 * is located, if any
461*4882a593Smuzhiyun 	 */
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	done = (host->page_idx << PAGE_SHIFT) + host->offset;
464*4882a593Smuzhiyun 	total = host->sg->offset + sg_dma_len(host->sg);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__,
467*4882a593Smuzhiyun 		done, total, host->offset);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (done < total && host->offset) {
470*4882a593Smuzhiyun 		/* More blocks in this page */
471*4882a593Smuzhiyun 		if (host->offset + data->blksz > PAGE_SIZE)
472*4882a593Smuzhiyun 			/* We approached at a block, that spans 2 pages */
473*4882a593Smuzhiyun 			usdhi6_blk_bounce(host, host->sg);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 		return;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/* Finished current page or an SG segment */
479*4882a593Smuzhiyun 	usdhi6_sg_unmap(host, false);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	if (done == total) {
482*4882a593Smuzhiyun 		/*
483*4882a593Smuzhiyun 		 * End of an SG segment or the complete SG: jump to the next
484*4882a593Smuzhiyun 		 * segment, we'll map it later in usdhi6_blk_read() or
485*4882a593Smuzhiyun 		 * usdhi6_blk_write()
486*4882a593Smuzhiyun 		 */
487*4882a593Smuzhiyun 		struct scatterlist *next = sg_next(host->sg);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 		host->page_idx = 0;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		if (!next)
492*4882a593Smuzhiyun 			host->wait = USDHI6_WAIT_FOR_DATA_END;
493*4882a593Smuzhiyun 		host->sg = next;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		if (WARN(next && sg_dma_len(next) % data->blksz,
496*4882a593Smuzhiyun 			 "SG size %u isn't a multiple of block size %u\n",
497*4882a593Smuzhiyun 			 sg_dma_len(next), data->blksz))
498*4882a593Smuzhiyun 			data->error = -EINVAL;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 		return;
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* We cannot get here after crossing a page border */
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	/* Next page in the same SG */
506*4882a593Smuzhiyun 	host->pg.page = nth_page(sg_page(host->sg), host->page_idx);
507*4882a593Smuzhiyun 	host->pg.mapped = kmap(host->pg.page);
508*4882a593Smuzhiyun 	host->blk_page = host->pg.mapped;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n",
511*4882a593Smuzhiyun 		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped,
512*4882a593Smuzhiyun 		host->mrq->cmd->opcode, host->mrq);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun /*			DMA handling					*/
516*4882a593Smuzhiyun 
usdhi6_dma_release(struct usdhi6_host * host)517*4882a593Smuzhiyun static void usdhi6_dma_release(struct usdhi6_host *host)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	host->dma_active = false;
520*4882a593Smuzhiyun 	if (host->chan_tx) {
521*4882a593Smuzhiyun 		struct dma_chan *chan = host->chan_tx;
522*4882a593Smuzhiyun 		host->chan_tx = NULL;
523*4882a593Smuzhiyun 		dma_release_channel(chan);
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 	if (host->chan_rx) {
526*4882a593Smuzhiyun 		struct dma_chan *chan = host->chan_rx;
527*4882a593Smuzhiyun 		host->chan_rx = NULL;
528*4882a593Smuzhiyun 		dma_release_channel(chan);
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
usdhi6_dma_stop_unmap(struct usdhi6_host * host)532*4882a593Smuzhiyun static void usdhi6_dma_stop_unmap(struct usdhi6_host *host)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (!host->dma_active)
537*4882a593Smuzhiyun 		return;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
540*4882a593Smuzhiyun 	host->dma_active = false;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (data->flags & MMC_DATA_READ)
543*4882a593Smuzhiyun 		dma_unmap_sg(host->chan_rx->device->dev, data->sg,
544*4882a593Smuzhiyun 			     data->sg_len, DMA_FROM_DEVICE);
545*4882a593Smuzhiyun 	else
546*4882a593Smuzhiyun 		dma_unmap_sg(host->chan_tx->device->dev, data->sg,
547*4882a593Smuzhiyun 			     data->sg_len, DMA_TO_DEVICE);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
usdhi6_dma_complete(void * arg)550*4882a593Smuzhiyun static void usdhi6_dma_complete(void *arg)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	struct usdhi6_host *host = arg;
553*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n",
556*4882a593Smuzhiyun 		 dev_name(mmc_dev(host->mmc)), mrq))
557*4882a593Smuzhiyun 		return;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__,
560*4882a593Smuzhiyun 		mrq->cmd->opcode);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	usdhi6_dma_stop_unmap(host);
563*4882a593Smuzhiyun 	usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
usdhi6_dma_setup(struct usdhi6_host * host,struct dma_chan * chan,enum dma_transfer_direction dir)566*4882a593Smuzhiyun static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan,
567*4882a593Smuzhiyun 			    enum dma_transfer_direction dir)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
570*4882a593Smuzhiyun 	struct scatterlist *sg = data->sg;
571*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *desc = NULL;
572*4882a593Smuzhiyun 	dma_cookie_t cookie = -EINVAL;
573*4882a593Smuzhiyun 	enum dma_data_direction data_dir;
574*4882a593Smuzhiyun 	int ret;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	switch (dir) {
577*4882a593Smuzhiyun 	case DMA_MEM_TO_DEV:
578*4882a593Smuzhiyun 		data_dir = DMA_TO_DEVICE;
579*4882a593Smuzhiyun 		break;
580*4882a593Smuzhiyun 	case DMA_DEV_TO_MEM:
581*4882a593Smuzhiyun 		data_dir = DMA_FROM_DEVICE;
582*4882a593Smuzhiyun 		break;
583*4882a593Smuzhiyun 	default:
584*4882a593Smuzhiyun 		return -EINVAL;
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir);
588*4882a593Smuzhiyun 	if (ret > 0) {
589*4882a593Smuzhiyun 		host->dma_active = true;
590*4882a593Smuzhiyun 		desc = dmaengine_prep_slave_sg(chan, sg, ret, dir,
591*4882a593Smuzhiyun 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (desc) {
595*4882a593Smuzhiyun 		desc->callback = usdhi6_dma_complete;
596*4882a593Smuzhiyun 		desc->callback_param = host;
597*4882a593Smuzhiyun 		cookie = dmaengine_submit(desc);
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n",
601*4882a593Smuzhiyun 		__func__, data->sg_len, ret, cookie, desc);
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	if (cookie < 0) {
604*4882a593Smuzhiyun 		/* DMA failed, fall back to PIO */
605*4882a593Smuzhiyun 		if (ret >= 0)
606*4882a593Smuzhiyun 			ret = cookie;
607*4882a593Smuzhiyun 		usdhi6_dma_release(host);
608*4882a593Smuzhiyun 		dev_warn(mmc_dev(host->mmc),
609*4882a593Smuzhiyun 			 "DMA failed: %d, falling back to PIO\n", ret);
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	return cookie;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
usdhi6_dma_start(struct usdhi6_host * host)615*4882a593Smuzhiyun static int usdhi6_dma_start(struct usdhi6_host *host)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	if (!host->chan_rx || !host->chan_tx)
618*4882a593Smuzhiyun 		return -ENODEV;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	if (host->mrq->data->flags & MMC_DATA_READ)
621*4882a593Smuzhiyun 		return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
usdhi6_dma_kill(struct usdhi6_host * host)626*4882a593Smuzhiyun static void usdhi6_dma_kill(struct usdhi6_host *host)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n",
631*4882a593Smuzhiyun 		__func__, data->sg_len, data->blocks, data->blksz);
632*4882a593Smuzhiyun 	/* Abort DMA */
633*4882a593Smuzhiyun 	if (data->flags & MMC_DATA_READ)
634*4882a593Smuzhiyun 		dmaengine_terminate_all(host->chan_rx);
635*4882a593Smuzhiyun 	else
636*4882a593Smuzhiyun 		dmaengine_terminate_all(host->chan_tx);
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun 
usdhi6_dma_check_error(struct usdhi6_host * host)639*4882a593Smuzhiyun static void usdhi6_dma_check_error(struct usdhi6_host *host)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n",
644*4882a593Smuzhiyun 		__func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1));
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	if (host->io_error) {
647*4882a593Smuzhiyun 		data->error = usdhi6_error_code(host);
648*4882a593Smuzhiyun 		data->bytes_xfered = 0;
649*4882a593Smuzhiyun 		usdhi6_dma_kill(host);
650*4882a593Smuzhiyun 		usdhi6_dma_release(host);
651*4882a593Smuzhiyun 		dev_warn(mmc_dev(host->mmc),
652*4882a593Smuzhiyun 			 "DMA failed: %d, falling back to PIO\n", data->error);
653*4882a593Smuzhiyun 		return;
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/*
657*4882a593Smuzhiyun 	 * The datasheet tells us to check a response from the card, whereas
658*4882a593Smuzhiyun 	 * responses only come after the command phase, not after the data
659*4882a593Smuzhiyun 	 * phase. Let's check anyway.
660*4882a593Smuzhiyun 	 */
661*4882a593Smuzhiyun 	if (host->irq_status & USDHI6_SD_INFO1_RSP_END)
662*4882a593Smuzhiyun 		dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n");
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
usdhi6_dma_kick(struct usdhi6_host * host)665*4882a593Smuzhiyun static void usdhi6_dma_kick(struct usdhi6_host *host)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	if (host->mrq->data->flags & MMC_DATA_READ)
668*4882a593Smuzhiyun 		dma_async_issue_pending(host->chan_rx);
669*4882a593Smuzhiyun 	else
670*4882a593Smuzhiyun 		dma_async_issue_pending(host->chan_tx);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
usdhi6_dma_request(struct usdhi6_host * host,phys_addr_t start)673*4882a593Smuzhiyun static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	struct dma_slave_config cfg = {
676*4882a593Smuzhiyun 		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
677*4882a593Smuzhiyun 		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
678*4882a593Smuzhiyun 	};
679*4882a593Smuzhiyun 	int ret;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx");
682*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__,
683*4882a593Smuzhiyun 		host->chan_tx);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	if (IS_ERR(host->chan_tx)) {
686*4882a593Smuzhiyun 		host->chan_tx = NULL;
687*4882a593Smuzhiyun 		return;
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	cfg.direction = DMA_MEM_TO_DEV;
691*4882a593Smuzhiyun 	cfg.dst_addr = start + USDHI6_SD_BUF0;
692*4882a593Smuzhiyun 	cfg.dst_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */
693*4882a593Smuzhiyun 	cfg.src_addr = 0;
694*4882a593Smuzhiyun 	ret = dmaengine_slave_config(host->chan_tx, &cfg);
695*4882a593Smuzhiyun 	if (ret < 0)
696*4882a593Smuzhiyun 		goto e_release_tx;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx");
699*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__,
700*4882a593Smuzhiyun 		host->chan_rx);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	if (IS_ERR(host->chan_rx)) {
703*4882a593Smuzhiyun 		host->chan_rx = NULL;
704*4882a593Smuzhiyun 		goto e_release_tx;
705*4882a593Smuzhiyun 	}
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	cfg.direction = DMA_DEV_TO_MEM;
708*4882a593Smuzhiyun 	cfg.src_addr = cfg.dst_addr;
709*4882a593Smuzhiyun 	cfg.src_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */
710*4882a593Smuzhiyun 	cfg.dst_addr = 0;
711*4882a593Smuzhiyun 	ret = dmaengine_slave_config(host->chan_rx, &cfg);
712*4882a593Smuzhiyun 	if (ret < 0)
713*4882a593Smuzhiyun 		goto e_release_rx;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	return;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun e_release_rx:
718*4882a593Smuzhiyun 	dma_release_channel(host->chan_rx);
719*4882a593Smuzhiyun 	host->chan_rx = NULL;
720*4882a593Smuzhiyun e_release_tx:
721*4882a593Smuzhiyun 	dma_release_channel(host->chan_tx);
722*4882a593Smuzhiyun 	host->chan_tx = NULL;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun /*			API helpers					*/
726*4882a593Smuzhiyun 
usdhi6_clk_set(struct usdhi6_host * host,struct mmc_ios * ios)727*4882a593Smuzhiyun static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	unsigned long rate = ios->clock;
730*4882a593Smuzhiyun 	u32 val;
731*4882a593Smuzhiyun 	unsigned int i;
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	for (i = 1000; i; i--) {
734*4882a593Smuzhiyun 		if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN)
735*4882a593Smuzhiyun 			break;
736*4882a593Smuzhiyun 		usleep_range(10, 100);
737*4882a593Smuzhiyun 	}
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	if (!i) {
740*4882a593Smuzhiyun 		dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n");
741*4882a593Smuzhiyun 		return;
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (rate) {
747*4882a593Smuzhiyun 		unsigned long new_rate;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 		if (host->imclk <= rate) {
750*4882a593Smuzhiyun 			if (ios->timing != MMC_TIMING_UHS_DDR50) {
751*4882a593Smuzhiyun 				/* Cannot have 1-to-1 clock in DDR mode */
752*4882a593Smuzhiyun 				new_rate = host->imclk;
753*4882a593Smuzhiyun 				val |= 0xff;
754*4882a593Smuzhiyun 			} else {
755*4882a593Smuzhiyun 				new_rate = host->imclk / 2;
756*4882a593Smuzhiyun 			}
757*4882a593Smuzhiyun 		} else {
758*4882a593Smuzhiyun 			unsigned long div =
759*4882a593Smuzhiyun 				roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate));
760*4882a593Smuzhiyun 			val |= div >> 2;
761*4882a593Smuzhiyun 			new_rate = host->imclk / div;
762*4882a593Smuzhiyun 		}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		if (host->rate == new_rate)
765*4882a593Smuzhiyun 			return;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 		host->rate = new_rate;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 		dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n",
770*4882a593Smuzhiyun 			rate, (val & 0xff) << 2, new_rate);
771*4882a593Smuzhiyun 	}
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	/*
774*4882a593Smuzhiyun 	 * if old or new rate is equal to input rate, have to switch the clock
775*4882a593Smuzhiyun 	 * off before changing and on after
776*4882a593Smuzhiyun 	 */
777*4882a593Smuzhiyun 	if (host->imclk == rate || host->imclk == host->rate || !rate)
778*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SD_CLK_CTRL,
779*4882a593Smuzhiyun 			     val & ~USDHI6_SD_CLK_CTRL_SCLKEN);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	if (!rate) {
782*4882a593Smuzhiyun 		host->rate = 0;
783*4882a593Smuzhiyun 		return;
784*4882a593Smuzhiyun 	}
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_CLK_CTRL, val);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	if (host->imclk == rate || host->imclk == host->rate ||
789*4882a593Smuzhiyun 	    !(val & USDHI6_SD_CLK_CTRL_SCLKEN))
790*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SD_CLK_CTRL,
791*4882a593Smuzhiyun 			     val | USDHI6_SD_CLK_CTRL_SCLKEN);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
usdhi6_set_power(struct usdhi6_host * host,struct mmc_ios * ios)794*4882a593Smuzhiyun static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun 	struct mmc_host *mmc = host->mmc;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	if (!IS_ERR(mmc->supply.vmmc))
799*4882a593Smuzhiyun 		/* Errors ignored... */
800*4882a593Smuzhiyun 		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
801*4882a593Smuzhiyun 				      ios->power_mode ? ios->vdd : 0);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun 
usdhi6_reset(struct usdhi6_host * host)804*4882a593Smuzhiyun static int usdhi6_reset(struct usdhi6_host *host)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	int i;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED);
809*4882a593Smuzhiyun 	cpu_relax();
810*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET);
811*4882a593Smuzhiyun 	for (i = 1000; i; i--)
812*4882a593Smuzhiyun 		if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET)
813*4882a593Smuzhiyun 			break;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	return i ? 0 : -ETIMEDOUT;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun 
usdhi6_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)818*4882a593Smuzhiyun static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun 	struct usdhi6_host *host = mmc_priv(mmc);
821*4882a593Smuzhiyun 	u32 option, mode;
822*4882a593Smuzhiyun 	int ret;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n",
825*4882a593Smuzhiyun 		ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	switch (ios->power_mode) {
828*4882a593Smuzhiyun 	case MMC_POWER_OFF:
829*4882a593Smuzhiyun 		usdhi6_set_power(host, ios);
830*4882a593Smuzhiyun 		usdhi6_only_cd(host);
831*4882a593Smuzhiyun 		break;
832*4882a593Smuzhiyun 	case MMC_POWER_UP:
833*4882a593Smuzhiyun 		/*
834*4882a593Smuzhiyun 		 * We only also touch USDHI6_SD_OPTION from .request(), which
835*4882a593Smuzhiyun 		 * cannot race with MMC_POWER_UP
836*4882a593Smuzhiyun 		 */
837*4882a593Smuzhiyun 		ret = usdhi6_reset(host);
838*4882a593Smuzhiyun 		if (ret < 0) {
839*4882a593Smuzhiyun 			dev_err(mmc_dev(mmc), "Cannot reset the interface!\n");
840*4882a593Smuzhiyun 		} else {
841*4882a593Smuzhiyun 			usdhi6_set_power(host, ios);
842*4882a593Smuzhiyun 			usdhi6_only_cd(host);
843*4882a593Smuzhiyun 		}
844*4882a593Smuzhiyun 		break;
845*4882a593Smuzhiyun 	case MMC_POWER_ON:
846*4882a593Smuzhiyun 		option = usdhi6_read(host, USDHI6_SD_OPTION);
847*4882a593Smuzhiyun 		/*
848*4882a593Smuzhiyun 		 * The eMMC standard only allows 4 or 8 bits in the DDR mode,
849*4882a593Smuzhiyun 		 * the same probably holds for SD cards. We check here anyway,
850*4882a593Smuzhiyun 		 * since the datasheet explicitly requires 4 bits for DDR.
851*4882a593Smuzhiyun 		 */
852*4882a593Smuzhiyun 		if (ios->bus_width == MMC_BUS_WIDTH_1) {
853*4882a593Smuzhiyun 			if (ios->timing == MMC_TIMING_UHS_DDR50)
854*4882a593Smuzhiyun 				dev_err(mmc_dev(mmc),
855*4882a593Smuzhiyun 					"4 bits are required for DDR\n");
856*4882a593Smuzhiyun 			option |= USDHI6_SD_OPTION_WIDTH_1;
857*4882a593Smuzhiyun 			mode = 0;
858*4882a593Smuzhiyun 		} else {
859*4882a593Smuzhiyun 			option &= ~USDHI6_SD_OPTION_WIDTH_1;
860*4882a593Smuzhiyun 			mode = ios->timing == MMC_TIMING_UHS_DDR50;
861*4882a593Smuzhiyun 		}
862*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SD_OPTION, option);
863*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SDIF_MODE, mode);
864*4882a593Smuzhiyun 		break;
865*4882a593Smuzhiyun 	}
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	if (host->rate != ios->clock)
868*4882a593Smuzhiyun 		usdhi6_clk_set(host, ios);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun /* This is data timeout. Response timeout is fixed to 640 clock cycles */
usdhi6_timeout_set(struct usdhi6_host * host)872*4882a593Smuzhiyun static void usdhi6_timeout_set(struct usdhi6_host *host)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
875*4882a593Smuzhiyun 	u32 val;
876*4882a593Smuzhiyun 	unsigned long ticks;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	if (!mrq->data)
879*4882a593Smuzhiyun 		ticks = host->rate / 1000 * mrq->cmd->busy_timeout;
880*4882a593Smuzhiyun 	else
881*4882a593Smuzhiyun 		ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) +
882*4882a593Smuzhiyun 			mrq->data->timeout_clks;
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	if (!ticks || ticks > 1 << 27)
885*4882a593Smuzhiyun 		/* Max timeout */
886*4882a593Smuzhiyun 		val = 14;
887*4882a593Smuzhiyun 	else if (ticks < 1 << 13)
888*4882a593Smuzhiyun 		/* Min timeout */
889*4882a593Smuzhiyun 		val = 0;
890*4882a593Smuzhiyun 	else
891*4882a593Smuzhiyun 		val = order_base_2(ticks) - 13;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n",
894*4882a593Smuzhiyun 		mrq->data ? "data" : "cmd", ticks, host->rate);
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	/* Timeout Counter mask: 0xf0 */
897*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) |
898*4882a593Smuzhiyun 		     (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK));
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
usdhi6_request_done(struct usdhi6_host * host)901*4882a593Smuzhiyun static void usdhi6_request_done(struct usdhi6_host *host)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
904*4882a593Smuzhiyun 	struct mmc_data *data = mrq->data;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	if (WARN(host->pg.page || host->head_pg.page,
907*4882a593Smuzhiyun 		 "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n",
908*4882a593Smuzhiyun 		 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode,
909*4882a593Smuzhiyun 		 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-',
910*4882a593Smuzhiyun 		 data ? host->offset : 0, data ? data->blocks : 0,
911*4882a593Smuzhiyun 		 data ? data->blksz : 0, data ? data->sg_len : 0))
912*4882a593Smuzhiyun 		usdhi6_sg_unmap(host, true);
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	if (mrq->cmd->error ||
915*4882a593Smuzhiyun 	    (data && data->error) ||
916*4882a593Smuzhiyun 	    (mrq->stop && mrq->stop->error))
917*4882a593Smuzhiyun 		dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n",
918*4882a593Smuzhiyun 			__func__, mrq->cmd->opcode, data ? data->blocks : 0,
919*4882a593Smuzhiyun 			data ? data->blksz : 0,
920*4882a593Smuzhiyun 			mrq->cmd->error,
921*4882a593Smuzhiyun 			data ? data->error : 1,
922*4882a593Smuzhiyun 			mrq->stop ? mrq->stop->error : 1);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	/* Disable DMA */
925*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_CC_EXT_MODE, 0);
926*4882a593Smuzhiyun 	host->wait = USDHI6_WAIT_FOR_REQUEST;
927*4882a593Smuzhiyun 	host->mrq = NULL;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	mmc_request_done(host->mmc, mrq);
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun 
usdhi6_cmd_flags(struct usdhi6_host * host)932*4882a593Smuzhiyun static int usdhi6_cmd_flags(struct usdhi6_host *host)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
935*4882a593Smuzhiyun 	struct mmc_command *cmd = mrq->cmd;
936*4882a593Smuzhiyun 	u16 opc = cmd->opcode;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	if (host->app_cmd) {
939*4882a593Smuzhiyun 		host->app_cmd = false;
940*4882a593Smuzhiyun 		opc |= USDHI6_SD_CMD_APP;
941*4882a593Smuzhiyun 	}
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	if (mrq->data) {
944*4882a593Smuzhiyun 		opc |= USDHI6_SD_CMD_DATA;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 		if (mrq->data->flags & MMC_DATA_READ)
947*4882a593Smuzhiyun 			opc |= USDHI6_SD_CMD_READ;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
950*4882a593Smuzhiyun 		    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
951*4882a593Smuzhiyun 		    (cmd->opcode == SD_IO_RW_EXTENDED &&
952*4882a593Smuzhiyun 		     mrq->data->blocks > 1)) {
953*4882a593Smuzhiyun 			opc |= USDHI6_SD_CMD_MULTI;
954*4882a593Smuzhiyun 			if (!mrq->stop)
955*4882a593Smuzhiyun 				opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF;
956*4882a593Smuzhiyun 		}
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 		switch (mmc_resp_type(cmd)) {
959*4882a593Smuzhiyun 		case MMC_RSP_NONE:
960*4882a593Smuzhiyun 			opc |= USDHI6_SD_CMD_MODE_RSP_NONE;
961*4882a593Smuzhiyun 			break;
962*4882a593Smuzhiyun 		case MMC_RSP_R1:
963*4882a593Smuzhiyun 			opc |= USDHI6_SD_CMD_MODE_RSP_R1;
964*4882a593Smuzhiyun 			break;
965*4882a593Smuzhiyun 		case MMC_RSP_R1B:
966*4882a593Smuzhiyun 			opc |= USDHI6_SD_CMD_MODE_RSP_R1B;
967*4882a593Smuzhiyun 			break;
968*4882a593Smuzhiyun 		case MMC_RSP_R2:
969*4882a593Smuzhiyun 			opc |= USDHI6_SD_CMD_MODE_RSP_R2;
970*4882a593Smuzhiyun 			break;
971*4882a593Smuzhiyun 		case MMC_RSP_R3:
972*4882a593Smuzhiyun 			opc |= USDHI6_SD_CMD_MODE_RSP_R3;
973*4882a593Smuzhiyun 			break;
974*4882a593Smuzhiyun 		default:
975*4882a593Smuzhiyun 			dev_warn(mmc_dev(host->mmc),
976*4882a593Smuzhiyun 				 "Unknown response type %d\n",
977*4882a593Smuzhiyun 				 mmc_resp_type(cmd));
978*4882a593Smuzhiyun 			return -EINVAL;
979*4882a593Smuzhiyun 		}
980*4882a593Smuzhiyun 	}
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	return opc;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun 
usdhi6_rq_start(struct usdhi6_host * host)985*4882a593Smuzhiyun static int usdhi6_rq_start(struct usdhi6_host *host)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
988*4882a593Smuzhiyun 	struct mmc_command *cmd = mrq->cmd;
989*4882a593Smuzhiyun 	struct mmc_data *data = mrq->data;
990*4882a593Smuzhiyun 	int opc = usdhi6_cmd_flags(host);
991*4882a593Smuzhiyun 	int i;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	if (opc < 0)
994*4882a593Smuzhiyun 		return opc;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	for (i = 1000; i; i--) {
997*4882a593Smuzhiyun 		if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY))
998*4882a593Smuzhiyun 			break;
999*4882a593Smuzhiyun 		usleep_range(10, 100);
1000*4882a593Smuzhiyun 	}
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	if (!i) {
1003*4882a593Smuzhiyun 		dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n");
1004*4882a593Smuzhiyun 		return -EAGAIN;
1005*4882a593Smuzhiyun 	}
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	if (data) {
1008*4882a593Smuzhiyun 		bool use_dma;
1009*4882a593Smuzhiyun 		int ret = 0;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 		host->page_idx = 0;
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 		if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) {
1014*4882a593Smuzhiyun 			switch (data->blksz) {
1015*4882a593Smuzhiyun 			case 512:
1016*4882a593Smuzhiyun 				break;
1017*4882a593Smuzhiyun 			case 32:
1018*4882a593Smuzhiyun 			case 64:
1019*4882a593Smuzhiyun 			case 128:
1020*4882a593Smuzhiyun 			case 256:
1021*4882a593Smuzhiyun 				if (mrq->stop)
1022*4882a593Smuzhiyun 					ret = -EINVAL;
1023*4882a593Smuzhiyun 				break;
1024*4882a593Smuzhiyun 			default:
1025*4882a593Smuzhiyun 				ret = -EINVAL;
1026*4882a593Smuzhiyun 			}
1027*4882a593Smuzhiyun 		} else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1028*4882a593Smuzhiyun 			    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) &&
1029*4882a593Smuzhiyun 			   data->blksz != 512) {
1030*4882a593Smuzhiyun 			ret = -EINVAL;
1031*4882a593Smuzhiyun 		}
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 		if (ret < 0) {
1034*4882a593Smuzhiyun 			dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n",
1035*4882a593Smuzhiyun 				 __func__, data->blocks, data->blksz);
1036*4882a593Smuzhiyun 			return -EINVAL;
1037*4882a593Smuzhiyun 		}
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1040*4882a593Smuzhiyun 		    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1041*4882a593Smuzhiyun 		    (cmd->opcode == SD_IO_RW_EXTENDED &&
1042*4882a593Smuzhiyun 		     data->blocks > 1))
1043*4882a593Smuzhiyun 			usdhi6_sg_prep(host);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SD_SIZE, data->blksz);
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 		if ((data->blksz >= USDHI6_MIN_DMA ||
1048*4882a593Smuzhiyun 		     data->blocks > 1) &&
1049*4882a593Smuzhiyun 		    (data->blksz % 4 ||
1050*4882a593Smuzhiyun 		     data->sg->offset % 4))
1051*4882a593Smuzhiyun 			dev_dbg(mmc_dev(host->mmc),
1052*4882a593Smuzhiyun 				"Bad SG of %u: %ux%u @ %u\n", data->sg_len,
1053*4882a593Smuzhiyun 				data->blksz, data->blocks, data->sg->offset);
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 		/* Enable DMA for USDHI6_MIN_DMA bytes or more */
1056*4882a593Smuzhiyun 		use_dma = data->blksz >= USDHI6_MIN_DMA &&
1057*4882a593Smuzhiyun 			!(data->blksz % 4) &&
1058*4882a593Smuzhiyun 			usdhi6_dma_start(host) >= DMA_MIN_COOKIE;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 		if (use_dma)
1061*4882a593Smuzhiyun 			usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 		dev_dbg(mmc_dev(host->mmc),
1064*4882a593Smuzhiyun 			"%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n",
1065*4882a593Smuzhiyun 			__func__, cmd->opcode, data->blocks, data->blksz,
1066*4882a593Smuzhiyun 			data->sg_len, use_dma ? "DMA" : "PIO",
1067*4882a593Smuzhiyun 			data->flags & MMC_DATA_READ ? "read" : "write",
1068*4882a593Smuzhiyun 			data->sg->offset, mrq->stop ? " + stop" : "");
1069*4882a593Smuzhiyun 	} else {
1070*4882a593Smuzhiyun 		dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n",
1071*4882a593Smuzhiyun 			__func__, cmd->opcode);
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	/* We have to get a command completion interrupt with DMA too */
1075*4882a593Smuzhiyun 	usdhi6_wait_for_resp(host);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	host->wait = USDHI6_WAIT_FOR_CMD;
1078*4882a593Smuzhiyun 	schedule_delayed_work(&host->timeout_work, host->timeout);
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	/* SEC bit is required to enable block counting by the core */
1081*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_STOP,
1082*4882a593Smuzhiyun 		     data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0);
1083*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_ARG, cmd->arg);
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	/* Kick command execution */
1086*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_CMD, opc);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	return 0;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun 
usdhi6_request(struct mmc_host * mmc,struct mmc_request * mrq)1091*4882a593Smuzhiyun static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun 	struct usdhi6_host *host = mmc_priv(mmc);
1094*4882a593Smuzhiyun 	int ret;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	cancel_delayed_work_sync(&host->timeout_work);
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	host->mrq = mrq;
1099*4882a593Smuzhiyun 	host->sg = NULL;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	usdhi6_timeout_set(host);
1102*4882a593Smuzhiyun 	ret = usdhi6_rq_start(host);
1103*4882a593Smuzhiyun 	if (ret < 0) {
1104*4882a593Smuzhiyun 		mrq->cmd->error = ret;
1105*4882a593Smuzhiyun 		usdhi6_request_done(host);
1106*4882a593Smuzhiyun 	}
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
usdhi6_get_cd(struct mmc_host * mmc)1109*4882a593Smuzhiyun static int usdhi6_get_cd(struct mmc_host *mmc)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun 	struct usdhi6_host *host = mmc_priv(mmc);
1112*4882a593Smuzhiyun 	/* Read is atomic, no need to lock */
1113*4882a593Smuzhiyun 	u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun /*
1116*4882a593Smuzhiyun  *	level	status.CD	CD_ACTIVE_HIGH	card present
1117*4882a593Smuzhiyun  *	1	0		0		0
1118*4882a593Smuzhiyun  *	1	0		1		1
1119*4882a593Smuzhiyun  *	0	1		0		1
1120*4882a593Smuzhiyun  *	0	1		1		0
1121*4882a593Smuzhiyun  */
1122*4882a593Smuzhiyun 	return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun 
usdhi6_get_ro(struct mmc_host * mmc)1125*4882a593Smuzhiyun static int usdhi6_get_ro(struct mmc_host *mmc)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun 	struct usdhi6_host *host = mmc_priv(mmc);
1128*4882a593Smuzhiyun 	/* No locking as above */
1129*4882a593Smuzhiyun 	u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP;
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun /*
1132*4882a593Smuzhiyun  *	level	status.WP	RO_ACTIVE_HIGH	card read-only
1133*4882a593Smuzhiyun  *	1	0		0		0
1134*4882a593Smuzhiyun  *	1	0		1		1
1135*4882a593Smuzhiyun  *	0	1		0		1
1136*4882a593Smuzhiyun  *	0	1		1		0
1137*4882a593Smuzhiyun  */
1138*4882a593Smuzhiyun 	return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
usdhi6_enable_sdio_irq(struct mmc_host * mmc,int enable)1141*4882a593Smuzhiyun static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun 	struct usdhi6_host *host = mmc_priv(mmc);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis");
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	if (enable) {
1148*4882a593Smuzhiyun 		host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ;
1149*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask);
1150*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SDIO_MODE, 1);
1151*4882a593Smuzhiyun 	} else {
1152*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SDIO_MODE, 0);
1153*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ);
1154*4882a593Smuzhiyun 		host->sdio_mask = USDHI6_SDIO_INFO1_IRQ;
1155*4882a593Smuzhiyun 	}
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun 
usdhi6_set_pinstates(struct usdhi6_host * host,int voltage)1158*4882a593Smuzhiyun static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun 	if (IS_ERR(host->pins_uhs))
1161*4882a593Smuzhiyun 		return 0;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	switch (voltage) {
1164*4882a593Smuzhiyun 	case MMC_SIGNAL_VOLTAGE_180:
1165*4882a593Smuzhiyun 	case MMC_SIGNAL_VOLTAGE_120:
1166*4882a593Smuzhiyun 		return pinctrl_select_state(host->pinctrl,
1167*4882a593Smuzhiyun 					    host->pins_uhs);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	default:
1170*4882a593Smuzhiyun 		return pinctrl_select_default_state(mmc_dev(host->mmc));
1171*4882a593Smuzhiyun 	}
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun 
usdhi6_sig_volt_switch(struct mmc_host * mmc,struct mmc_ios * ios)1174*4882a593Smuzhiyun static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun 	int ret;
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	ret = mmc_regulator_set_vqmmc(mmc, ios);
1179*4882a593Smuzhiyun 	if (ret < 0)
1180*4882a593Smuzhiyun 		return ret;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage);
1183*4882a593Smuzhiyun 	if (ret)
1184*4882a593Smuzhiyun 		dev_warn_once(mmc_dev(mmc),
1185*4882a593Smuzhiyun 			      "Failed to set pinstate err=%d\n", ret);
1186*4882a593Smuzhiyun 	return ret;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun static const struct mmc_host_ops usdhi6_ops = {
1190*4882a593Smuzhiyun 	.request	= usdhi6_request,
1191*4882a593Smuzhiyun 	.set_ios	= usdhi6_set_ios,
1192*4882a593Smuzhiyun 	.get_cd		= usdhi6_get_cd,
1193*4882a593Smuzhiyun 	.get_ro		= usdhi6_get_ro,
1194*4882a593Smuzhiyun 	.enable_sdio_irq = usdhi6_enable_sdio_irq,
1195*4882a593Smuzhiyun 	.start_signal_voltage_switch = usdhi6_sig_volt_switch,
1196*4882a593Smuzhiyun };
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun /*			State machine handlers				*/
1199*4882a593Smuzhiyun 
usdhi6_resp_cmd12(struct usdhi6_host * host)1200*4882a593Smuzhiyun static void usdhi6_resp_cmd12(struct usdhi6_host *host)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun 	struct mmc_command *cmd = host->mrq->stop;
1203*4882a593Smuzhiyun 	cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun 
usdhi6_resp_read(struct usdhi6_host * host)1206*4882a593Smuzhiyun static void usdhi6_resp_read(struct usdhi6_host *host)
1207*4882a593Smuzhiyun {
1208*4882a593Smuzhiyun 	struct mmc_command *cmd = host->mrq->cmd;
1209*4882a593Smuzhiyun 	u32 *rsp = cmd->resp, tmp = 0;
1210*4882a593Smuzhiyun 	int i;
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun /*
1213*4882a593Smuzhiyun  * RSP10	39-8
1214*4882a593Smuzhiyun  * RSP32	71-40
1215*4882a593Smuzhiyun  * RSP54	103-72
1216*4882a593Smuzhiyun  * RSP76	127-104
1217*4882a593Smuzhiyun  * R2-type response:
1218*4882a593Smuzhiyun  * resp[0]	= r[127..96]
1219*4882a593Smuzhiyun  * resp[1]	= r[95..64]
1220*4882a593Smuzhiyun  * resp[2]	= r[63..32]
1221*4882a593Smuzhiyun  * resp[3]	= r[31..0]
1222*4882a593Smuzhiyun  * Other responses:
1223*4882a593Smuzhiyun  * resp[0]	= r[39..8]
1224*4882a593Smuzhiyun  */
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	if (mmc_resp_type(cmd) == MMC_RSP_NONE)
1227*4882a593Smuzhiyun 		return;
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) {
1230*4882a593Smuzhiyun 		dev_err(mmc_dev(host->mmc),
1231*4882a593Smuzhiyun 			"CMD%d: response expected but is missing!\n", cmd->opcode);
1232*4882a593Smuzhiyun 		return;
1233*4882a593Smuzhiyun 	}
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	if (mmc_resp_type(cmd) & MMC_RSP_136)
1236*4882a593Smuzhiyun 		for (i = 0; i < 4; i++) {
1237*4882a593Smuzhiyun 			if (i)
1238*4882a593Smuzhiyun 				rsp[3 - i] = tmp >> 24;
1239*4882a593Smuzhiyun 			tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8);
1240*4882a593Smuzhiyun 			rsp[3 - i] |= tmp << 8;
1241*4882a593Smuzhiyun 		}
1242*4882a593Smuzhiyun 	else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1243*4882a593Smuzhiyun 		 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
1244*4882a593Smuzhiyun 		/* Read RSP54 to avoid conflict with auto CMD12 */
1245*4882a593Smuzhiyun 		rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54);
1246*4882a593Smuzhiyun 	else
1247*4882a593Smuzhiyun 		rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]);
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun 
usdhi6_blk_read(struct usdhi6_host * host)1252*4882a593Smuzhiyun static int usdhi6_blk_read(struct usdhi6_host *host)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
1255*4882a593Smuzhiyun 	u32 *p;
1256*4882a593Smuzhiyun 	int i, rest;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	if (host->io_error) {
1259*4882a593Smuzhiyun 		data->error = usdhi6_error_code(host);
1260*4882a593Smuzhiyun 		goto error;
1261*4882a593Smuzhiyun 	}
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	if (host->pg.page) {
1264*4882a593Smuzhiyun 		p = host->blk_page + host->offset;
1265*4882a593Smuzhiyun 	} else {
1266*4882a593Smuzhiyun 		p = usdhi6_sg_map(host);
1267*4882a593Smuzhiyun 		if (!p) {
1268*4882a593Smuzhiyun 			data->error = -ENOMEM;
1269*4882a593Smuzhiyun 			goto error;
1270*4882a593Smuzhiyun 		}
1271*4882a593Smuzhiyun 	}
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	for (i = 0; i < data->blksz / 4; i++, p++)
1274*4882a593Smuzhiyun 		*p = usdhi6_read(host, USDHI6_SD_BUF0);
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	rest = data->blksz % 4;
1277*4882a593Smuzhiyun 	for (i = 0; i < (rest + 1) / 2; i++) {
1278*4882a593Smuzhiyun 		u16 d = usdhi6_read16(host, USDHI6_SD_BUF0);
1279*4882a593Smuzhiyun 		((u8 *)p)[2 * i] = ((u8 *)&d)[0];
1280*4882a593Smuzhiyun 		if (rest > 1 && !i)
1281*4882a593Smuzhiyun 			((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1];
1282*4882a593Smuzhiyun 	}
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	return 0;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun error:
1287*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1288*4882a593Smuzhiyun 	host->wait = USDHI6_WAIT_FOR_REQUEST;
1289*4882a593Smuzhiyun 	return data->error;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun 
usdhi6_blk_write(struct usdhi6_host * host)1292*4882a593Smuzhiyun static int usdhi6_blk_write(struct usdhi6_host *host)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun 	struct mmc_data *data = host->mrq->data;
1295*4882a593Smuzhiyun 	u32 *p;
1296*4882a593Smuzhiyun 	int i, rest;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	if (host->io_error) {
1299*4882a593Smuzhiyun 		data->error = usdhi6_error_code(host);
1300*4882a593Smuzhiyun 		goto error;
1301*4882a593Smuzhiyun 	}
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	if (host->pg.page) {
1304*4882a593Smuzhiyun 		p = host->blk_page + host->offset;
1305*4882a593Smuzhiyun 	} else {
1306*4882a593Smuzhiyun 		p = usdhi6_sg_map(host);
1307*4882a593Smuzhiyun 		if (!p) {
1308*4882a593Smuzhiyun 			data->error = -ENOMEM;
1309*4882a593Smuzhiyun 			goto error;
1310*4882a593Smuzhiyun 		}
1311*4882a593Smuzhiyun 	}
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	for (i = 0; i < data->blksz / 4; i++, p++)
1314*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SD_BUF0, *p);
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	rest = data->blksz % 4;
1317*4882a593Smuzhiyun 	for (i = 0; i < (rest + 1) / 2; i++) {
1318*4882a593Smuzhiyun 		u16 d;
1319*4882a593Smuzhiyun 		((u8 *)&d)[0] = ((u8 *)p)[2 * i];
1320*4882a593Smuzhiyun 		if (rest > 1 && !i)
1321*4882a593Smuzhiyun 			((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1];
1322*4882a593Smuzhiyun 		else
1323*4882a593Smuzhiyun 			((u8 *)&d)[1] = 0;
1324*4882a593Smuzhiyun 		usdhi6_write16(host, USDHI6_SD_BUF0, d);
1325*4882a593Smuzhiyun 	}
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	return 0;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun error:
1330*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error);
1331*4882a593Smuzhiyun 	host->wait = USDHI6_WAIT_FOR_REQUEST;
1332*4882a593Smuzhiyun 	return data->error;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun 
usdhi6_stop_cmd(struct usdhi6_host * host)1335*4882a593Smuzhiyun static int usdhi6_stop_cmd(struct usdhi6_host *host)
1336*4882a593Smuzhiyun {
1337*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	switch (mrq->cmd->opcode) {
1340*4882a593Smuzhiyun 	case MMC_READ_MULTIPLE_BLOCK:
1341*4882a593Smuzhiyun 	case MMC_WRITE_MULTIPLE_BLOCK:
1342*4882a593Smuzhiyun 		if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) {
1343*4882a593Smuzhiyun 			host->wait = USDHI6_WAIT_FOR_STOP;
1344*4882a593Smuzhiyun 			return 0;
1345*4882a593Smuzhiyun 		}
1346*4882a593Smuzhiyun 		fallthrough;	/* Unsupported STOP command */
1347*4882a593Smuzhiyun 	default:
1348*4882a593Smuzhiyun 		dev_err(mmc_dev(host->mmc),
1349*4882a593Smuzhiyun 			"unsupported stop CMD%d for CMD%d\n",
1350*4882a593Smuzhiyun 			mrq->stop->opcode, mrq->cmd->opcode);
1351*4882a593Smuzhiyun 		mrq->stop->error = -EOPNOTSUPP;
1352*4882a593Smuzhiyun 	}
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun 
usdhi6_end_cmd(struct usdhi6_host * host)1357*4882a593Smuzhiyun static bool usdhi6_end_cmd(struct usdhi6_host *host)
1358*4882a593Smuzhiyun {
1359*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
1360*4882a593Smuzhiyun 	struct mmc_command *cmd = mrq->cmd;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	if (host->io_error) {
1363*4882a593Smuzhiyun 		cmd->error = usdhi6_error_code(host);
1364*4882a593Smuzhiyun 		return false;
1365*4882a593Smuzhiyun 	}
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	usdhi6_resp_read(host);
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	if (!mrq->data)
1370*4882a593Smuzhiyun 		return false;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	if (host->dma_active) {
1373*4882a593Smuzhiyun 		usdhi6_dma_kick(host);
1374*4882a593Smuzhiyun 		if (!mrq->stop)
1375*4882a593Smuzhiyun 			host->wait = USDHI6_WAIT_FOR_DMA;
1376*4882a593Smuzhiyun 		else if (usdhi6_stop_cmd(host) < 0)
1377*4882a593Smuzhiyun 			return false;
1378*4882a593Smuzhiyun 	} else if (mrq->data->flags & MMC_DATA_READ) {
1379*4882a593Smuzhiyun 		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
1380*4882a593Smuzhiyun 		    (cmd->opcode == SD_IO_RW_EXTENDED &&
1381*4882a593Smuzhiyun 		     mrq->data->blocks > 1))
1382*4882a593Smuzhiyun 			host->wait = USDHI6_WAIT_FOR_MREAD;
1383*4882a593Smuzhiyun 		else
1384*4882a593Smuzhiyun 			host->wait = USDHI6_WAIT_FOR_READ;
1385*4882a593Smuzhiyun 	} else {
1386*4882a593Smuzhiyun 		if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1387*4882a593Smuzhiyun 		    (cmd->opcode == SD_IO_RW_EXTENDED &&
1388*4882a593Smuzhiyun 		     mrq->data->blocks > 1))
1389*4882a593Smuzhiyun 			host->wait = USDHI6_WAIT_FOR_MWRITE;
1390*4882a593Smuzhiyun 		else
1391*4882a593Smuzhiyun 			host->wait = USDHI6_WAIT_FOR_WRITE;
1392*4882a593Smuzhiyun 	}
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	return true;
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun 
usdhi6_read_block(struct usdhi6_host * host)1397*4882a593Smuzhiyun static bool usdhi6_read_block(struct usdhi6_host *host)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun 	/* ACCESS_END IRQ is already unmasked */
1400*4882a593Smuzhiyun 	int ret = usdhi6_blk_read(host);
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	/*
1403*4882a593Smuzhiyun 	 * Have to force unmapping both pages: the single block could have been
1404*4882a593Smuzhiyun 	 * cross-page, in which case for single-block IO host->page_idx == 0.
1405*4882a593Smuzhiyun 	 * So, if we don't force, the second page won't be unmapped.
1406*4882a593Smuzhiyun 	 */
1407*4882a593Smuzhiyun 	usdhi6_sg_unmap(host, true);
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	if (ret < 0)
1410*4882a593Smuzhiyun 		return false;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	host->wait = USDHI6_WAIT_FOR_DATA_END;
1413*4882a593Smuzhiyun 	return true;
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun 
usdhi6_mread_block(struct usdhi6_host * host)1416*4882a593Smuzhiyun static bool usdhi6_mread_block(struct usdhi6_host *host)
1417*4882a593Smuzhiyun {
1418*4882a593Smuzhiyun 	int ret = usdhi6_blk_read(host);
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	if (ret < 0)
1421*4882a593Smuzhiyun 		return false;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	usdhi6_sg_advance(host);
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	return !host->mrq->data->error &&
1426*4882a593Smuzhiyun 		(host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun 
usdhi6_write_block(struct usdhi6_host * host)1429*4882a593Smuzhiyun static bool usdhi6_write_block(struct usdhi6_host *host)
1430*4882a593Smuzhiyun {
1431*4882a593Smuzhiyun 	int ret = usdhi6_blk_write(host);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	/* See comment in usdhi6_read_block() */
1434*4882a593Smuzhiyun 	usdhi6_sg_unmap(host, true);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	if (ret < 0)
1437*4882a593Smuzhiyun 		return false;
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	host->wait = USDHI6_WAIT_FOR_DATA_END;
1440*4882a593Smuzhiyun 	return true;
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun 
usdhi6_mwrite_block(struct usdhi6_host * host)1443*4882a593Smuzhiyun static bool usdhi6_mwrite_block(struct usdhi6_host *host)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun 	int ret = usdhi6_blk_write(host);
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	if (ret < 0)
1448*4882a593Smuzhiyun 		return false;
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	usdhi6_sg_advance(host);
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	return !host->mrq->data->error &&
1453*4882a593Smuzhiyun 		(host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop);
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun /*			Interrupt & timeout handlers			*/
1457*4882a593Smuzhiyun 
usdhi6_sd_bh(int irq,void * dev_id)1458*4882a593Smuzhiyun static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id)
1459*4882a593Smuzhiyun {
1460*4882a593Smuzhiyun 	struct usdhi6_host *host = dev_id;
1461*4882a593Smuzhiyun 	struct mmc_request *mrq;
1462*4882a593Smuzhiyun 	struct mmc_command *cmd;
1463*4882a593Smuzhiyun 	struct mmc_data *data;
1464*4882a593Smuzhiyun 	bool io_wait = false;
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	cancel_delayed_work_sync(&host->timeout_work);
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	mrq = host->mrq;
1469*4882a593Smuzhiyun 	if (!mrq)
1470*4882a593Smuzhiyun 		return IRQ_HANDLED;
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	cmd = mrq->cmd;
1473*4882a593Smuzhiyun 	data = mrq->data;
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	switch (host->wait) {
1476*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_REQUEST:
1477*4882a593Smuzhiyun 		/* We're too late, the timeout has already kicked in */
1478*4882a593Smuzhiyun 		return IRQ_HANDLED;
1479*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_CMD:
1480*4882a593Smuzhiyun 		/* Wait for data? */
1481*4882a593Smuzhiyun 		io_wait = usdhi6_end_cmd(host);
1482*4882a593Smuzhiyun 		break;
1483*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_MREAD:
1484*4882a593Smuzhiyun 		/* Wait for more data? */
1485*4882a593Smuzhiyun 		io_wait = usdhi6_mread_block(host);
1486*4882a593Smuzhiyun 		break;
1487*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_READ:
1488*4882a593Smuzhiyun 		/* Wait for data end? */
1489*4882a593Smuzhiyun 		io_wait = usdhi6_read_block(host);
1490*4882a593Smuzhiyun 		break;
1491*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_MWRITE:
1492*4882a593Smuzhiyun 		/* Wait data to write? */
1493*4882a593Smuzhiyun 		io_wait = usdhi6_mwrite_block(host);
1494*4882a593Smuzhiyun 		break;
1495*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_WRITE:
1496*4882a593Smuzhiyun 		/* Wait for data end? */
1497*4882a593Smuzhiyun 		io_wait = usdhi6_write_block(host);
1498*4882a593Smuzhiyun 		break;
1499*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_DMA:
1500*4882a593Smuzhiyun 		usdhi6_dma_check_error(host);
1501*4882a593Smuzhiyun 		break;
1502*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_STOP:
1503*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SD_STOP, 0);
1504*4882a593Smuzhiyun 		if (host->io_error) {
1505*4882a593Smuzhiyun 			int ret = usdhi6_error_code(host);
1506*4882a593Smuzhiyun 			if (mrq->stop)
1507*4882a593Smuzhiyun 				mrq->stop->error = ret;
1508*4882a593Smuzhiyun 			else
1509*4882a593Smuzhiyun 				mrq->data->error = ret;
1510*4882a593Smuzhiyun 			dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret);
1511*4882a593Smuzhiyun 			break;
1512*4882a593Smuzhiyun 		}
1513*4882a593Smuzhiyun 		usdhi6_resp_cmd12(host);
1514*4882a593Smuzhiyun 		mrq->stop->error = 0;
1515*4882a593Smuzhiyun 		break;
1516*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_DATA_END:
1517*4882a593Smuzhiyun 		if (host->io_error) {
1518*4882a593Smuzhiyun 			mrq->data->error = usdhi6_error_code(host);
1519*4882a593Smuzhiyun 			dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__,
1520*4882a593Smuzhiyun 				 mrq->data->error);
1521*4882a593Smuzhiyun 		}
1522*4882a593Smuzhiyun 		break;
1523*4882a593Smuzhiyun 	default:
1524*4882a593Smuzhiyun 		cmd->error = -EFAULT;
1525*4882a593Smuzhiyun 		dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1526*4882a593Smuzhiyun 		usdhi6_request_done(host);
1527*4882a593Smuzhiyun 		return IRQ_HANDLED;
1528*4882a593Smuzhiyun 	}
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	if (io_wait) {
1531*4882a593Smuzhiyun 		schedule_delayed_work(&host->timeout_work, host->timeout);
1532*4882a593Smuzhiyun 		/* Wait for more data or ACCESS_END */
1533*4882a593Smuzhiyun 		if (!host->dma_active)
1534*4882a593Smuzhiyun 			usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ);
1535*4882a593Smuzhiyun 		return IRQ_HANDLED;
1536*4882a593Smuzhiyun 	}
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	if (!cmd->error) {
1539*4882a593Smuzhiyun 		if (data) {
1540*4882a593Smuzhiyun 			if (!data->error) {
1541*4882a593Smuzhiyun 				if (host->wait != USDHI6_WAIT_FOR_STOP &&
1542*4882a593Smuzhiyun 				    host->mrq->stop &&
1543*4882a593Smuzhiyun 				    !host->mrq->stop->error &&
1544*4882a593Smuzhiyun 				    !usdhi6_stop_cmd(host)) {
1545*4882a593Smuzhiyun 					/* Sending STOP */
1546*4882a593Smuzhiyun 					usdhi6_wait_for_resp(host);
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 					schedule_delayed_work(&host->timeout_work,
1549*4882a593Smuzhiyun 							      host->timeout);
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 					return IRQ_HANDLED;
1552*4882a593Smuzhiyun 				}
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun 				data->bytes_xfered = data->blocks * data->blksz;
1555*4882a593Smuzhiyun 			} else {
1556*4882a593Smuzhiyun 				/* Data error: might need to unmap the last page */
1557*4882a593Smuzhiyun 				dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n",
1558*4882a593Smuzhiyun 					 __func__, data->error);
1559*4882a593Smuzhiyun 				usdhi6_sg_unmap(host, true);
1560*4882a593Smuzhiyun 			}
1561*4882a593Smuzhiyun 		} else if (cmd->opcode == MMC_APP_CMD) {
1562*4882a593Smuzhiyun 			host->app_cmd = true;
1563*4882a593Smuzhiyun 		}
1564*4882a593Smuzhiyun 	}
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	usdhi6_request_done(host);
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 	return IRQ_HANDLED;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun 
usdhi6_sd(int irq,void * dev_id)1571*4882a593Smuzhiyun static irqreturn_t usdhi6_sd(int irq, void *dev_id)
1572*4882a593Smuzhiyun {
1573*4882a593Smuzhiyun 	struct usdhi6_host *host = dev_id;
1574*4882a593Smuzhiyun 	u16 status, status2, error;
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1577*4882a593Smuzhiyun 		~USDHI6_SD_INFO1_CARD;
1578*4882a593Smuzhiyun 	status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	usdhi6_only_cd(host);
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc),
1583*4882a593Smuzhiyun 		"IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2);
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	if (!status && !status2)
1586*4882a593Smuzhiyun 		return IRQ_NONE;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	error = status2 & USDHI6_SD_INFO2_ERR;
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 	/* Ack / clear interrupts */
1591*4882a593Smuzhiyun 	if (USDHI6_SD_INFO1_IRQ & status)
1592*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SD_INFO1,
1593*4882a593Smuzhiyun 			     0xffff & ~(USDHI6_SD_INFO1_IRQ & status));
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	if (USDHI6_SD_INFO2_IRQ & status2) {
1596*4882a593Smuzhiyun 		if (error)
1597*4882a593Smuzhiyun 			/* In error cases BWE and BRE aren't cleared automatically */
1598*4882a593Smuzhiyun 			status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE;
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 		usdhi6_write(host, USDHI6_SD_INFO2,
1601*4882a593Smuzhiyun 			     0xffff & ~(USDHI6_SD_INFO2_IRQ & status2));
1602*4882a593Smuzhiyun 	}
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	host->io_error = error;
1605*4882a593Smuzhiyun 	host->irq_status = status;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	if (error) {
1608*4882a593Smuzhiyun 		/* Don't pollute the log with unsupported command timeouts */
1609*4882a593Smuzhiyun 		if (host->wait != USDHI6_WAIT_FOR_CMD ||
1610*4882a593Smuzhiyun 		    error != USDHI6_SD_INFO2_RSP_TOUT)
1611*4882a593Smuzhiyun 			dev_warn(mmc_dev(host->mmc),
1612*4882a593Smuzhiyun 				 "%s(): INFO2 error bits 0x%08x\n",
1613*4882a593Smuzhiyun 				 __func__, error);
1614*4882a593Smuzhiyun 		else
1615*4882a593Smuzhiyun 			dev_dbg(mmc_dev(host->mmc),
1616*4882a593Smuzhiyun 				"%s(): INFO2 error bits 0x%08x\n",
1617*4882a593Smuzhiyun 				__func__, error);
1618*4882a593Smuzhiyun 	}
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	return IRQ_WAKE_THREAD;
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun 
usdhi6_sdio(int irq,void * dev_id)1623*4882a593Smuzhiyun static irqreturn_t usdhi6_sdio(int irq, void *dev_id)
1624*4882a593Smuzhiyun {
1625*4882a593Smuzhiyun 	struct usdhi6_host *host = dev_id;
1626*4882a593Smuzhiyun 	u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask;
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status);
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	if (!status)
1631*4882a593Smuzhiyun 		return IRQ_NONE;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SDIO_INFO1, ~status);
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	mmc_signal_sdio_irq(host->mmc);
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	return IRQ_HANDLED;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun 
usdhi6_cd(int irq,void * dev_id)1640*4882a593Smuzhiyun static irqreturn_t usdhi6_cd(int irq, void *dev_id)
1641*4882a593Smuzhiyun {
1642*4882a593Smuzhiyun 	struct usdhi6_host *host = dev_id;
1643*4882a593Smuzhiyun 	struct mmc_host *mmc = host->mmc;
1644*4882a593Smuzhiyun 	u16 status;
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 	/* We're only interested in hotplug events here */
1647*4882a593Smuzhiyun 	status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask &
1648*4882a593Smuzhiyun 		USDHI6_SD_INFO1_CARD;
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	if (!status)
1651*4882a593Smuzhiyun 		return IRQ_NONE;
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 	/* Ack */
1654*4882a593Smuzhiyun 	usdhi6_write(host, USDHI6_SD_INFO1, ~status);
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	if (!work_pending(&mmc->detect.work) &&
1657*4882a593Smuzhiyun 	    (((status & USDHI6_SD_INFO1_CARD_INSERT) &&
1658*4882a593Smuzhiyun 	      !mmc->card) ||
1659*4882a593Smuzhiyun 	     ((status & USDHI6_SD_INFO1_CARD_EJECT) &&
1660*4882a593Smuzhiyun 	      mmc->card)))
1661*4882a593Smuzhiyun 		mmc_detect_change(mmc, msecs_to_jiffies(100));
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	return IRQ_HANDLED;
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun /*
1667*4882a593Smuzhiyun  * Actually this should not be needed, if the built-in timeout works reliably in
1668*4882a593Smuzhiyun  * the both PIO cases and DMA never fails. But if DMA does fail, a timeout
1669*4882a593Smuzhiyun  * handler might be the only way to catch the error.
1670*4882a593Smuzhiyun  */
usdhi6_timeout_work(struct work_struct * work)1671*4882a593Smuzhiyun static void usdhi6_timeout_work(struct work_struct *work)
1672*4882a593Smuzhiyun {
1673*4882a593Smuzhiyun 	struct delayed_work *d = to_delayed_work(work);
1674*4882a593Smuzhiyun 	struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
1675*4882a593Smuzhiyun 	struct mmc_request *mrq = host->mrq;
1676*4882a593Smuzhiyun 	struct mmc_data *data = mrq ? mrq->data : NULL;
1677*4882a593Smuzhiyun 	struct scatterlist *sg;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	dev_warn(mmc_dev(host->mmc),
1680*4882a593Smuzhiyun 		 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
1681*4882a593Smuzhiyun 		 host->dma_active ? "DMA" : "PIO",
1682*4882a593Smuzhiyun 		 host->wait, mrq ? mrq->cmd->opcode : -1,
1683*4882a593Smuzhiyun 		 usdhi6_read(host, USDHI6_SD_INFO1),
1684*4882a593Smuzhiyun 		 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status);
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	if (host->dma_active) {
1687*4882a593Smuzhiyun 		usdhi6_dma_kill(host);
1688*4882a593Smuzhiyun 		usdhi6_dma_stop_unmap(host);
1689*4882a593Smuzhiyun 	}
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	switch (host->wait) {
1692*4882a593Smuzhiyun 	default:
1693*4882a593Smuzhiyun 		dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
1694*4882a593Smuzhiyun 		fallthrough;	/* mrq can be NULL, but is impossible */
1695*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_CMD:
1696*4882a593Smuzhiyun 		usdhi6_error_code(host);
1697*4882a593Smuzhiyun 		if (mrq)
1698*4882a593Smuzhiyun 			mrq->cmd->error = -ETIMEDOUT;
1699*4882a593Smuzhiyun 		break;
1700*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_STOP:
1701*4882a593Smuzhiyun 		usdhi6_error_code(host);
1702*4882a593Smuzhiyun 		mrq->stop->error = -ETIMEDOUT;
1703*4882a593Smuzhiyun 		break;
1704*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_DMA:
1705*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_MREAD:
1706*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_MWRITE:
1707*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_READ:
1708*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_WRITE:
1709*4882a593Smuzhiyun 		sg = host->sg ?: data->sg;
1710*4882a593Smuzhiyun 		dev_dbg(mmc_dev(host->mmc),
1711*4882a593Smuzhiyun 			"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
1712*4882a593Smuzhiyun 			data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
1713*4882a593Smuzhiyun 			host->offset, data->blocks, data->blksz, data->sg_len,
1714*4882a593Smuzhiyun 			sg_dma_len(sg), sg->offset);
1715*4882a593Smuzhiyun 		usdhi6_sg_unmap(host, true);
1716*4882a593Smuzhiyun 		fallthrough;	/* page unmapped in USDHI6_WAIT_FOR_DATA_END */
1717*4882a593Smuzhiyun 	case USDHI6_WAIT_FOR_DATA_END:
1718*4882a593Smuzhiyun 		usdhi6_error_code(host);
1719*4882a593Smuzhiyun 		data->error = -ETIMEDOUT;
1720*4882a593Smuzhiyun 	}
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	if (mrq)
1723*4882a593Smuzhiyun 		usdhi6_request_done(host);
1724*4882a593Smuzhiyun }
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun /*			 Probe / release				*/
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun static const struct of_device_id usdhi6_of_match[] = {
1729*4882a593Smuzhiyun 	{.compatible = "renesas,usdhi6rol0"},
1730*4882a593Smuzhiyun 	{}
1731*4882a593Smuzhiyun };
1732*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, usdhi6_of_match);
1733*4882a593Smuzhiyun 
usdhi6_probe(struct platform_device * pdev)1734*4882a593Smuzhiyun static int usdhi6_probe(struct platform_device *pdev)
1735*4882a593Smuzhiyun {
1736*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
1737*4882a593Smuzhiyun 	struct mmc_host *mmc;
1738*4882a593Smuzhiyun 	struct usdhi6_host *host;
1739*4882a593Smuzhiyun 	struct resource *res;
1740*4882a593Smuzhiyun 	int irq_cd, irq_sd, irq_sdio;
1741*4882a593Smuzhiyun 	u32 version;
1742*4882a593Smuzhiyun 	int ret;
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	if (!dev->of_node)
1745*4882a593Smuzhiyun 		return -ENODEV;
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	irq_cd = platform_get_irq_byname(pdev, "card detect");
1748*4882a593Smuzhiyun 	irq_sd = platform_get_irq_byname(pdev, "data");
1749*4882a593Smuzhiyun 	irq_sdio = platform_get_irq_byname(pdev, "SDIO");
1750*4882a593Smuzhiyun 	if (irq_sd < 0 || irq_sdio < 0)
1751*4882a593Smuzhiyun 		return -ENODEV;
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 	mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev);
1754*4882a593Smuzhiyun 	if (!mmc)
1755*4882a593Smuzhiyun 		return -ENOMEM;
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 	ret = mmc_regulator_get_supply(mmc);
1758*4882a593Smuzhiyun 	if (ret)
1759*4882a593Smuzhiyun 		goto e_free_mmc;
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 	ret = mmc_of_parse(mmc);
1762*4882a593Smuzhiyun 	if (ret < 0)
1763*4882a593Smuzhiyun 		goto e_free_mmc;
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	host		= mmc_priv(mmc);
1766*4882a593Smuzhiyun 	host->mmc	= mmc;
1767*4882a593Smuzhiyun 	host->wait	= USDHI6_WAIT_FOR_REQUEST;
1768*4882a593Smuzhiyun 	host->timeout	= msecs_to_jiffies(USDHI6_REQ_TIMEOUT_MS);
1769*4882a593Smuzhiyun 	/*
1770*4882a593Smuzhiyun 	 * We use a fixed timeout of 4s, hence inform the core about it. A
1771*4882a593Smuzhiyun 	 * future improvement should instead respect the cmd->busy_timeout.
1772*4882a593Smuzhiyun 	 */
1773*4882a593Smuzhiyun 	mmc->max_busy_timeout = USDHI6_REQ_TIMEOUT_MS;
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	host->pinctrl = devm_pinctrl_get(&pdev->dev);
1776*4882a593Smuzhiyun 	if (IS_ERR(host->pinctrl)) {
1777*4882a593Smuzhiyun 		ret = PTR_ERR(host->pinctrl);
1778*4882a593Smuzhiyun 		goto e_free_mmc;
1779*4882a593Smuzhiyun 	}
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 	host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs");
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1784*4882a593Smuzhiyun 	host->base = devm_ioremap_resource(dev, res);
1785*4882a593Smuzhiyun 	if (IS_ERR(host->base)) {
1786*4882a593Smuzhiyun 		ret = PTR_ERR(host->base);
1787*4882a593Smuzhiyun 		goto e_free_mmc;
1788*4882a593Smuzhiyun 	}
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 	host->clk = devm_clk_get(dev, NULL);
1791*4882a593Smuzhiyun 	if (IS_ERR(host->clk)) {
1792*4882a593Smuzhiyun 		ret = PTR_ERR(host->clk);
1793*4882a593Smuzhiyun 		goto e_free_mmc;
1794*4882a593Smuzhiyun 	}
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	host->imclk = clk_get_rate(host->clk);
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	ret = clk_prepare_enable(host->clk);
1799*4882a593Smuzhiyun 	if (ret < 0)
1800*4882a593Smuzhiyun 		goto e_free_mmc;
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 	version = usdhi6_read(host, USDHI6_VERSION);
1803*4882a593Smuzhiyun 	if ((version & 0xfff) != 0xa0d) {
1804*4882a593Smuzhiyun 		ret = -EPERM;
1805*4882a593Smuzhiyun 		dev_err(dev, "Version not recognized %x\n", version);
1806*4882a593Smuzhiyun 		goto e_clk_off;
1807*4882a593Smuzhiyun 	}
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 	dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n",
1810*4882a593Smuzhiyun 		 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT);
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	usdhi6_mask_all(host);
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	if (irq_cd >= 0) {
1815*4882a593Smuzhiyun 		ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0,
1816*4882a593Smuzhiyun 				       dev_name(dev), host);
1817*4882a593Smuzhiyun 		if (ret < 0)
1818*4882a593Smuzhiyun 			goto e_clk_off;
1819*4882a593Smuzhiyun 	} else {
1820*4882a593Smuzhiyun 		mmc->caps |= MMC_CAP_NEEDS_POLL;
1821*4882a593Smuzhiyun 	}
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0,
1824*4882a593Smuzhiyun 			       dev_name(dev), host);
1825*4882a593Smuzhiyun 	if (ret < 0)
1826*4882a593Smuzhiyun 		goto e_clk_off;
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0,
1829*4882a593Smuzhiyun 			       dev_name(dev), host);
1830*4882a593Smuzhiyun 	if (ret < 0)
1831*4882a593Smuzhiyun 		goto e_clk_off;
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work);
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 	usdhi6_dma_request(host, res->start);
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun 	mmc->ops = &usdhi6_ops;
1838*4882a593Smuzhiyun 	mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
1839*4882a593Smuzhiyun 		     MMC_CAP_SDIO_IRQ;
1840*4882a593Smuzhiyun 	/* Set .max_segs to some random number. Feel free to adjust. */
1841*4882a593Smuzhiyun 	mmc->max_segs = 32;
1842*4882a593Smuzhiyun 	mmc->max_blk_size = 512;
1843*4882a593Smuzhiyun 	mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
1844*4882a593Smuzhiyun 	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1845*4882a593Smuzhiyun 	/*
1846*4882a593Smuzhiyun 	 * Setting .max_seg_size to 1 page would simplify our page-mapping code,
1847*4882a593Smuzhiyun 	 * But OTOH, having large segments makes DMA more efficient. We could
1848*4882a593Smuzhiyun 	 * check, whether we managed to get DMA and fall back to 1 page
1849*4882a593Smuzhiyun 	 * segments, but if we do manage to obtain DMA and then it fails at
1850*4882a593Smuzhiyun 	 * run-time and we fall back to PIO, we will continue getting large
1851*4882a593Smuzhiyun 	 * segments. So, we wouldn't be able to get rid of the code anyway.
1852*4882a593Smuzhiyun 	 */
1853*4882a593Smuzhiyun 	mmc->max_seg_size = mmc->max_req_size;
1854*4882a593Smuzhiyun 	if (!mmc->f_max)
1855*4882a593Smuzhiyun 		mmc->f_max = host->imclk;
1856*4882a593Smuzhiyun 	mmc->f_min = host->imclk / 512;
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	platform_set_drvdata(pdev, host);
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	ret = mmc_add_host(mmc);
1861*4882a593Smuzhiyun 	if (ret < 0)
1862*4882a593Smuzhiyun 		goto e_release_dma;
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	return 0;
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun e_release_dma:
1867*4882a593Smuzhiyun 	usdhi6_dma_release(host);
1868*4882a593Smuzhiyun e_clk_off:
1869*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
1870*4882a593Smuzhiyun e_free_mmc:
1871*4882a593Smuzhiyun 	mmc_free_host(mmc);
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	return ret;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun 
usdhi6_remove(struct platform_device * pdev)1876*4882a593Smuzhiyun static int usdhi6_remove(struct platform_device *pdev)
1877*4882a593Smuzhiyun {
1878*4882a593Smuzhiyun 	struct usdhi6_host *host = platform_get_drvdata(pdev);
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 	mmc_remove_host(host->mmc);
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	usdhi6_mask_all(host);
1883*4882a593Smuzhiyun 	cancel_delayed_work_sync(&host->timeout_work);
1884*4882a593Smuzhiyun 	usdhi6_dma_release(host);
1885*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
1886*4882a593Smuzhiyun 	mmc_free_host(host->mmc);
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 	return 0;
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun static struct platform_driver usdhi6_driver = {
1892*4882a593Smuzhiyun 	.probe		= usdhi6_probe,
1893*4882a593Smuzhiyun 	.remove		= usdhi6_remove,
1894*4882a593Smuzhiyun 	.driver		= {
1895*4882a593Smuzhiyun 		.name	= "usdhi6rol0",
1896*4882a593Smuzhiyun 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1897*4882a593Smuzhiyun 		.of_match_table = usdhi6_of_match,
1898*4882a593Smuzhiyun 	},
1899*4882a593Smuzhiyun };
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun module_platform_driver(usdhi6_driver);
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver");
1904*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1905*4882a593Smuzhiyun MODULE_ALIAS("platform:usdhi6rol0");
1906*4882a593Smuzhiyun MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
1907