xref: /OK3568_Linux_fs/kernel/drivers/mmc/host/uniphier-sd.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // Copyright (C) 2017-2018 Socionext Inc.
4*4882a593Smuzhiyun //   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/bitfield.h>
7*4882a593Smuzhiyun #include <linux/bitops.h>
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/mfd/tmio.h>
12*4882a593Smuzhiyun #include <linux/mmc/host.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/of_device.h>
16*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
17*4882a593Smuzhiyun #include <linux/platform_device.h>
18*4882a593Smuzhiyun #include <linux/reset.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "tmio_mmc.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define   UNIPHIER_SD_CLK_CTL_DIV1024		BIT(16)
23*4882a593Smuzhiyun #define   UNIPHIER_SD_CLK_CTL_DIV1		BIT(10)
24*4882a593Smuzhiyun #define   UNIPHIER_SD_CLKCTL_OFFEN		BIT(9)  // auto SDCLK stop
25*4882a593Smuzhiyun #define UNIPHIER_SD_CC_EXT_MODE		0x1b0
26*4882a593Smuzhiyun #define   UNIPHIER_SD_CC_EXT_MODE_DMA		BIT(1)
27*4882a593Smuzhiyun #define UNIPHIER_SD_HOST_MODE		0x1c8
28*4882a593Smuzhiyun #define UNIPHIER_SD_VOLT		0x1e4
29*4882a593Smuzhiyun #define   UNIPHIER_SD_VOLT_MASK			GENMASK(1, 0)
30*4882a593Smuzhiyun #define   UNIPHIER_SD_VOLT_OFF			0
31*4882a593Smuzhiyun #define   UNIPHIER_SD_VOLT_330			1	// 3.3V signal
32*4882a593Smuzhiyun #define   UNIPHIER_SD_VOLT_180			2	// 1.8V signal
33*4882a593Smuzhiyun #define UNIPHIER_SD_DMA_MODE		0x410
34*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_MODE_DIR_MASK		GENMASK(17, 16)
35*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_MODE_DIR_TO_DEV	0
36*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV	1
37*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_MODE_WIDTH_MASK	GENMASK(5, 4)
38*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_MODE_WIDTH_8		0
39*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_MODE_WIDTH_16		1
40*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_MODE_WIDTH_32		2
41*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_MODE_WIDTH_64		3
42*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_MODE_ADDR_INC		BIT(0)	// 1: inc, 0: fixed
43*4882a593Smuzhiyun #define UNIPHIER_SD_DMA_CTL		0x414
44*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_CTL_START	BIT(0)	// start DMA (auto cleared)
45*4882a593Smuzhiyun #define UNIPHIER_SD_DMA_RST		0x418
46*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_RST_CH1	BIT(9)
47*4882a593Smuzhiyun #define   UNIPHIER_SD_DMA_RST_CH0	BIT(8)
48*4882a593Smuzhiyun #define UNIPHIER_SD_DMA_ADDR_L		0x440
49*4882a593Smuzhiyun #define UNIPHIER_SD_DMA_ADDR_H		0x444
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun  * IP is extended to support various features: built-in DMA engine,
53*4882a593Smuzhiyun  * 1/1024 divisor, etc.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun #define UNIPHIER_SD_CAP_EXTENDED_IP		BIT(0)
56*4882a593Smuzhiyun /* RX channel of the built-in DMA controller is broken (Pro5) */
57*4882a593Smuzhiyun #define UNIPHIER_SD_CAP_BROKEN_DMA_RX		BIT(1)
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun struct uniphier_sd_priv {
60*4882a593Smuzhiyun 	struct tmio_mmc_data tmio_data;
61*4882a593Smuzhiyun 	struct pinctrl *pinctrl;
62*4882a593Smuzhiyun 	struct pinctrl_state *pinstate_uhs;
63*4882a593Smuzhiyun 	struct clk *clk;
64*4882a593Smuzhiyun 	struct reset_control *rst;
65*4882a593Smuzhiyun 	struct reset_control *rst_br;
66*4882a593Smuzhiyun 	struct reset_control *rst_hw;
67*4882a593Smuzhiyun 	struct dma_chan *chan;
68*4882a593Smuzhiyun 	enum dma_data_direction dma_dir;
69*4882a593Smuzhiyun 	unsigned long clk_rate;
70*4882a593Smuzhiyun 	unsigned long caps;
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun 
uniphier_sd_priv(struct tmio_mmc_host * host)73*4882a593Smuzhiyun static void *uniphier_sd_priv(struct tmio_mmc_host *host)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	return container_of(host->pdata, struct uniphier_sd_priv, tmio_data);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
uniphier_sd_dma_endisable(struct tmio_mmc_host * host,int enable)78*4882a593Smuzhiyun static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* external DMA engine */
uniphier_sd_external_dma_issue(unsigned long arg)84*4882a593Smuzhiyun static void uniphier_sd_external_dma_issue(unsigned long arg)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	struct tmio_mmc_host *host = (void *)arg;
87*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	uniphier_sd_dma_endisable(host, 1);
90*4882a593Smuzhiyun 	dma_async_issue_pending(priv->chan);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
uniphier_sd_external_dma_callback(void * param,const struct dmaengine_result * result)93*4882a593Smuzhiyun static void uniphier_sd_external_dma_callback(void *param,
94*4882a593Smuzhiyun 					const struct dmaengine_result *result)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct tmio_mmc_host *host = param;
97*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
98*4882a593Smuzhiyun 	unsigned long flags;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len,
101*4882a593Smuzhiyun 		     priv->dma_dir);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	spin_lock_irqsave(&host->lock, flags);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (result->result == DMA_TRANS_NOERROR) {
106*4882a593Smuzhiyun 		/*
107*4882a593Smuzhiyun 		 * When the external DMA engine is enabled, strangely enough,
108*4882a593Smuzhiyun 		 * the DATAEND flag can be asserted even if the DMA engine has
109*4882a593Smuzhiyun 		 * not been kicked yet.  Enable the TMIO_STAT_DATAEND irq only
110*4882a593Smuzhiyun 		 * after we make sure the DMA engine finishes the transfer,
111*4882a593Smuzhiyun 		 * hence, in this callback.
112*4882a593Smuzhiyun 		 */
113*4882a593Smuzhiyun 		tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
114*4882a593Smuzhiyun 	} else {
115*4882a593Smuzhiyun 		host->data->error = -ETIMEDOUT;
116*4882a593Smuzhiyun 		tmio_mmc_do_data_irq(host);
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	spin_unlock_irqrestore(&host->lock, flags);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
uniphier_sd_external_dma_start(struct tmio_mmc_host * host,struct mmc_data * data)122*4882a593Smuzhiyun static void uniphier_sd_external_dma_start(struct tmio_mmc_host *host,
123*4882a593Smuzhiyun 					   struct mmc_data *data)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
126*4882a593Smuzhiyun 	enum dma_transfer_direction dma_tx_dir;
127*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *desc;
128*4882a593Smuzhiyun 	dma_cookie_t cookie;
129*4882a593Smuzhiyun 	int sg_len;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (!priv->chan)
132*4882a593Smuzhiyun 		goto force_pio;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (data->flags & MMC_DATA_READ) {
135*4882a593Smuzhiyun 		priv->dma_dir = DMA_FROM_DEVICE;
136*4882a593Smuzhiyun 		dma_tx_dir = DMA_DEV_TO_MEM;
137*4882a593Smuzhiyun 	} else {
138*4882a593Smuzhiyun 		priv->dma_dir = DMA_TO_DEVICE;
139*4882a593Smuzhiyun 		dma_tx_dir = DMA_MEM_TO_DEV;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	sg_len = dma_map_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len,
143*4882a593Smuzhiyun 			    priv->dma_dir);
144*4882a593Smuzhiyun 	if (sg_len == 0)
145*4882a593Smuzhiyun 		goto force_pio;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	desc = dmaengine_prep_slave_sg(priv->chan, host->sg_ptr, sg_len,
148*4882a593Smuzhiyun 				       dma_tx_dir, DMA_CTRL_ACK);
149*4882a593Smuzhiyun 	if (!desc)
150*4882a593Smuzhiyun 		goto unmap_sg;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	desc->callback_result = uniphier_sd_external_dma_callback;
153*4882a593Smuzhiyun 	desc->callback_param = host;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	cookie = dmaengine_submit(desc);
156*4882a593Smuzhiyun 	if (cookie < 0)
157*4882a593Smuzhiyun 		goto unmap_sg;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	host->dma_on = true;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun unmap_sg:
164*4882a593Smuzhiyun 	dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len,
165*4882a593Smuzhiyun 		     priv->dma_dir);
166*4882a593Smuzhiyun force_pio:
167*4882a593Smuzhiyun 	uniphier_sd_dma_endisable(host, 0);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
uniphier_sd_external_dma_enable(struct tmio_mmc_host * host,bool enable)170*4882a593Smuzhiyun static void uniphier_sd_external_dma_enable(struct tmio_mmc_host *host,
171*4882a593Smuzhiyun 					    bool enable)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
uniphier_sd_external_dma_request(struct tmio_mmc_host * host,struct tmio_mmc_data * pdata)175*4882a593Smuzhiyun static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host,
176*4882a593Smuzhiyun 					     struct tmio_mmc_data *pdata)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
179*4882a593Smuzhiyun 	struct dma_chan *chan;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	chan = dma_request_chan(mmc_dev(host->mmc), "rx-tx");
182*4882a593Smuzhiyun 	if (IS_ERR(chan)) {
183*4882a593Smuzhiyun 		dev_warn(mmc_dev(host->mmc),
184*4882a593Smuzhiyun 			 "failed to request DMA channel. falling back to PIO\n");
185*4882a593Smuzhiyun 		return;	/* just use PIO even for -EPROBE_DEFER */
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* this driver uses a single channel for both RX an TX */
189*4882a593Smuzhiyun 	priv->chan = chan;
190*4882a593Smuzhiyun 	host->chan_rx = chan;
191*4882a593Smuzhiyun 	host->chan_tx = chan;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	tasklet_init(&host->dma_issue, uniphier_sd_external_dma_issue,
194*4882a593Smuzhiyun 		     (unsigned long)host);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
uniphier_sd_external_dma_release(struct tmio_mmc_host * host)197*4882a593Smuzhiyun static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (priv->chan)
202*4882a593Smuzhiyun 		dma_release_channel(priv->chan);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
uniphier_sd_external_dma_abort(struct tmio_mmc_host * host)205*4882a593Smuzhiyun static void uniphier_sd_external_dma_abort(struct tmio_mmc_host *host)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	uniphier_sd_dma_endisable(host, 0);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (priv->chan)
212*4882a593Smuzhiyun 		dmaengine_terminate_sync(priv->chan);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
uniphier_sd_external_dma_dataend(struct tmio_mmc_host * host)215*4882a593Smuzhiyun static void uniphier_sd_external_dma_dataend(struct tmio_mmc_host *host)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	uniphier_sd_dma_endisable(host, 0);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	tmio_mmc_do_data_irq(host);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = {
223*4882a593Smuzhiyun 	.start = uniphier_sd_external_dma_start,
224*4882a593Smuzhiyun 	.enable = uniphier_sd_external_dma_enable,
225*4882a593Smuzhiyun 	.request = uniphier_sd_external_dma_request,
226*4882a593Smuzhiyun 	.release = uniphier_sd_external_dma_release,
227*4882a593Smuzhiyun 	.abort = uniphier_sd_external_dma_abort,
228*4882a593Smuzhiyun 	.dataend = uniphier_sd_external_dma_dataend,
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun 
uniphier_sd_internal_dma_issue(unsigned long arg)231*4882a593Smuzhiyun static void uniphier_sd_internal_dma_issue(unsigned long arg)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct tmio_mmc_host *host = (void *)arg;
234*4882a593Smuzhiyun 	unsigned long flags;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	spin_lock_irqsave(&host->lock, flags);
237*4882a593Smuzhiyun 	tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND);
238*4882a593Smuzhiyun 	spin_unlock_irqrestore(&host->lock, flags);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	uniphier_sd_dma_endisable(host, 1);
241*4882a593Smuzhiyun 	writel(UNIPHIER_SD_DMA_CTL_START, host->ctl + UNIPHIER_SD_DMA_CTL);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
uniphier_sd_internal_dma_start(struct tmio_mmc_host * host,struct mmc_data * data)244*4882a593Smuzhiyun static void uniphier_sd_internal_dma_start(struct tmio_mmc_host *host,
245*4882a593Smuzhiyun 					   struct mmc_data *data)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
248*4882a593Smuzhiyun 	struct scatterlist *sg = host->sg_ptr;
249*4882a593Smuzhiyun 	dma_addr_t dma_addr;
250*4882a593Smuzhiyun 	unsigned int dma_mode_dir;
251*4882a593Smuzhiyun 	u32 dma_mode;
252*4882a593Smuzhiyun 	int sg_len;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	if ((data->flags & MMC_DATA_READ) && !host->chan_rx)
255*4882a593Smuzhiyun 		goto force_pio;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (WARN_ON(host->sg_len != 1))
258*4882a593Smuzhiyun 		goto force_pio;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	if (!IS_ALIGNED(sg->offset, 8))
261*4882a593Smuzhiyun 		goto force_pio;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (data->flags & MMC_DATA_READ) {
264*4882a593Smuzhiyun 		priv->dma_dir = DMA_FROM_DEVICE;
265*4882a593Smuzhiyun 		dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV;
266*4882a593Smuzhiyun 	} else {
267*4882a593Smuzhiyun 		priv->dma_dir = DMA_TO_DEVICE;
268*4882a593Smuzhiyun 		dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_TO_DEV;
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	sg_len = dma_map_sg(mmc_dev(host->mmc), sg, 1, priv->dma_dir);
272*4882a593Smuzhiyun 	if (sg_len == 0)
273*4882a593Smuzhiyun 		goto force_pio;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	dma_mode = FIELD_PREP(UNIPHIER_SD_DMA_MODE_DIR_MASK, dma_mode_dir);
276*4882a593Smuzhiyun 	dma_mode |= FIELD_PREP(UNIPHIER_SD_DMA_MODE_WIDTH_MASK,
277*4882a593Smuzhiyun 			       UNIPHIER_SD_DMA_MODE_WIDTH_64);
278*4882a593Smuzhiyun 	dma_mode |= UNIPHIER_SD_DMA_MODE_ADDR_INC;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	writel(dma_mode, host->ctl + UNIPHIER_SD_DMA_MODE);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	dma_addr = sg_dma_address(data->sg);
283*4882a593Smuzhiyun 	writel(lower_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_L);
284*4882a593Smuzhiyun 	writel(upper_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_H);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	host->dma_on = true;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	return;
289*4882a593Smuzhiyun force_pio:
290*4882a593Smuzhiyun 	uniphier_sd_dma_endisable(host, 0);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
uniphier_sd_internal_dma_enable(struct tmio_mmc_host * host,bool enable)293*4882a593Smuzhiyun static void uniphier_sd_internal_dma_enable(struct tmio_mmc_host *host,
294*4882a593Smuzhiyun 					    bool enable)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
uniphier_sd_internal_dma_request(struct tmio_mmc_host * host,struct tmio_mmc_data * pdata)298*4882a593Smuzhiyun static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host,
299*4882a593Smuzhiyun 					     struct tmio_mmc_data *pdata)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	/*
304*4882a593Smuzhiyun 	 * Due to a hardware bug, Pro5 cannot use DMA for RX.
305*4882a593Smuzhiyun 	 * We can still use DMA for TX, but PIO for RX.
306*4882a593Smuzhiyun 	 */
307*4882a593Smuzhiyun 	if (!(priv->caps & UNIPHIER_SD_CAP_BROKEN_DMA_RX))
308*4882a593Smuzhiyun 		host->chan_rx = (void *)0xdeadbeaf;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	host->chan_tx = (void *)0xdeadbeaf;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	tasklet_init(&host->dma_issue, uniphier_sd_internal_dma_issue,
313*4882a593Smuzhiyun 		     (unsigned long)host);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
uniphier_sd_internal_dma_release(struct tmio_mmc_host * host)316*4882a593Smuzhiyun static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	/* Each value is set to zero to assume "disabling" each DMA */
319*4882a593Smuzhiyun 	host->chan_rx = NULL;
320*4882a593Smuzhiyun 	host->chan_tx = NULL;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
uniphier_sd_internal_dma_abort(struct tmio_mmc_host * host)323*4882a593Smuzhiyun static void uniphier_sd_internal_dma_abort(struct tmio_mmc_host *host)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	u32 tmp;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	uniphier_sd_dma_endisable(host, 0);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	tmp = readl(host->ctl + UNIPHIER_SD_DMA_RST);
330*4882a593Smuzhiyun 	tmp &= ~(UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0);
331*4882a593Smuzhiyun 	writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	tmp |= UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0;
334*4882a593Smuzhiyun 	writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
uniphier_sd_internal_dma_dataend(struct tmio_mmc_host * host)337*4882a593Smuzhiyun static void uniphier_sd_internal_dma_dataend(struct tmio_mmc_host *host)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	uniphier_sd_dma_endisable(host, 0);
342*4882a593Smuzhiyun 	dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, 1, priv->dma_dir);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	tmio_mmc_do_data_irq(host);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun static const struct tmio_mmc_dma_ops uniphier_sd_internal_dma_ops = {
348*4882a593Smuzhiyun 	.start = uniphier_sd_internal_dma_start,
349*4882a593Smuzhiyun 	.enable = uniphier_sd_internal_dma_enable,
350*4882a593Smuzhiyun 	.request = uniphier_sd_internal_dma_request,
351*4882a593Smuzhiyun 	.release = uniphier_sd_internal_dma_release,
352*4882a593Smuzhiyun 	.abort = uniphier_sd_internal_dma_abort,
353*4882a593Smuzhiyun 	.dataend = uniphier_sd_internal_dma_dataend,
354*4882a593Smuzhiyun };
355*4882a593Smuzhiyun 
uniphier_sd_clk_enable(struct tmio_mmc_host * host)356*4882a593Smuzhiyun static int uniphier_sd_clk_enable(struct tmio_mmc_host *host)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
359*4882a593Smuzhiyun 	struct mmc_host *mmc = host->mmc;
360*4882a593Smuzhiyun 	int ret;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	ret = clk_prepare_enable(priv->clk);
363*4882a593Smuzhiyun 	if (ret)
364*4882a593Smuzhiyun 		return ret;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	ret = clk_set_rate(priv->clk, ULONG_MAX);
367*4882a593Smuzhiyun 	if (ret)
368*4882a593Smuzhiyun 		goto disable_clk;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	priv->clk_rate = clk_get_rate(priv->clk);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	/* If max-frequency property is set, use it. */
373*4882a593Smuzhiyun 	if (!mmc->f_max)
374*4882a593Smuzhiyun 		mmc->f_max = priv->clk_rate;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	/*
377*4882a593Smuzhiyun 	 * 1/512 is the finest divisor in the original IP.  Newer versions
378*4882a593Smuzhiyun 	 * also supports 1/1024 divisor. (UniPhier-specific extension)
379*4882a593Smuzhiyun 	 */
380*4882a593Smuzhiyun 	if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
381*4882a593Smuzhiyun 		mmc->f_min = priv->clk_rate / 1024;
382*4882a593Smuzhiyun 	else
383*4882a593Smuzhiyun 		mmc->f_min = priv->clk_rate / 512;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	ret = reset_control_deassert(priv->rst);
386*4882a593Smuzhiyun 	if (ret)
387*4882a593Smuzhiyun 		goto disable_clk;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	ret = reset_control_deassert(priv->rst_br);
390*4882a593Smuzhiyun 	if (ret)
391*4882a593Smuzhiyun 		goto assert_rst;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	return 0;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun assert_rst:
396*4882a593Smuzhiyun 	reset_control_assert(priv->rst);
397*4882a593Smuzhiyun disable_clk:
398*4882a593Smuzhiyun 	clk_disable_unprepare(priv->clk);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	return ret;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
uniphier_sd_clk_disable(struct tmio_mmc_host * host)403*4882a593Smuzhiyun static void uniphier_sd_clk_disable(struct tmio_mmc_host *host)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	reset_control_assert(priv->rst_br);
408*4882a593Smuzhiyun 	reset_control_assert(priv->rst);
409*4882a593Smuzhiyun 	clk_disable_unprepare(priv->clk);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
uniphier_sd_hw_reset(struct mmc_host * mmc)412*4882a593Smuzhiyun static void uniphier_sd_hw_reset(struct mmc_host *mmc)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	struct tmio_mmc_host *host = mmc_priv(mmc);
415*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	reset_control_assert(priv->rst_hw);
418*4882a593Smuzhiyun 	/* For eMMC, minimum is 1us but give it 9us for good measure */
419*4882a593Smuzhiyun 	udelay(9);
420*4882a593Smuzhiyun 	reset_control_deassert(priv->rst_hw);
421*4882a593Smuzhiyun 	/* For eMMC, minimum is 200us but give it 300us for good measure */
422*4882a593Smuzhiyun 	usleep_range(300, 1000);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
uniphier_sd_set_clock(struct tmio_mmc_host * host,unsigned int clock)425*4882a593Smuzhiyun static void uniphier_sd_set_clock(struct tmio_mmc_host *host,
426*4882a593Smuzhiyun 				  unsigned int clock)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
429*4882a593Smuzhiyun 	unsigned long divisor;
430*4882a593Smuzhiyun 	u32 tmp;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	tmp = readl(host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	/* stop the clock before changing its rate to avoid a glitch signal */
435*4882a593Smuzhiyun 	tmp &= ~CLK_CTL_SCLKEN;
436*4882a593Smuzhiyun 	writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (clock == 0)
439*4882a593Smuzhiyun 		return;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1024;
442*4882a593Smuzhiyun 	tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1;
443*4882a593Smuzhiyun 	tmp &= ~CLK_CTL_DIV_MASK;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	divisor = priv->clk_rate / clock;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/*
448*4882a593Smuzhiyun 	 * In the original IP, bit[7:0] represents the divisor.
449*4882a593Smuzhiyun 	 * bit7 set: 1/512, ... bit0 set:1/4, all bits clear: 1/2
450*4882a593Smuzhiyun 	 *
451*4882a593Smuzhiyun 	 * The IP does not define a way to achieve 1/1.  For UniPhier variants,
452*4882a593Smuzhiyun 	 * bit10 is used for 1/1.  Newer versions of UniPhier variants use
453*4882a593Smuzhiyun 	 * bit16 for 1/1024.
454*4882a593Smuzhiyun 	 */
455*4882a593Smuzhiyun 	if (divisor <= 1)
456*4882a593Smuzhiyun 		tmp |= UNIPHIER_SD_CLK_CTL_DIV1;
457*4882a593Smuzhiyun 	else if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP && divisor > 512)
458*4882a593Smuzhiyun 		tmp |= UNIPHIER_SD_CLK_CTL_DIV1024;
459*4882a593Smuzhiyun 	else
460*4882a593Smuzhiyun 		tmp |= roundup_pow_of_two(divisor) >> 2;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	tmp |= CLK_CTL_SCLKEN;
465*4882a593Smuzhiyun 	writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
uniphier_sd_host_init(struct tmio_mmc_host * host)468*4882a593Smuzhiyun static void uniphier_sd_host_init(struct tmio_mmc_host *host)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
471*4882a593Smuzhiyun 	u32 val;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/*
474*4882a593Smuzhiyun 	 * Connected to 32bit AXI.
475*4882a593Smuzhiyun 	 * This register holds settings for SoC-specific internal bus
476*4882a593Smuzhiyun 	 * connection.  What is worse, the register spec was changed,
477*4882a593Smuzhiyun 	 * breaking the backward compatibility.  Write an appropriate
478*4882a593Smuzhiyun 	 * value depending on a flag associated with a compatible string.
479*4882a593Smuzhiyun 	 */
480*4882a593Smuzhiyun 	if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
481*4882a593Smuzhiyun 		val = 0x00000101;
482*4882a593Smuzhiyun 	else
483*4882a593Smuzhiyun 		val = 0x00000000;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	writel(val, host->ctl + UNIPHIER_SD_HOST_MODE);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	val = 0;
488*4882a593Smuzhiyun 	/*
489*4882a593Smuzhiyun 	 * If supported, the controller can automatically
490*4882a593Smuzhiyun 	 * enable/disable the clock line to the card.
491*4882a593Smuzhiyun 	 */
492*4882a593Smuzhiyun 	if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
493*4882a593Smuzhiyun 		val |= UNIPHIER_SD_CLKCTL_OFFEN;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	writel(val, host->ctl + (CTL_SD_CARD_CLK_CTL << 1));
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
uniphier_sd_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)498*4882a593Smuzhiyun static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc,
499*4882a593Smuzhiyun 						   struct mmc_ios *ios)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	struct tmio_mmc_host *host = mmc_priv(mmc);
502*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv = uniphier_sd_priv(host);
503*4882a593Smuzhiyun 	struct pinctrl_state *pinstate = NULL;
504*4882a593Smuzhiyun 	u32 val, tmp;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	switch (ios->signal_voltage) {
507*4882a593Smuzhiyun 	case MMC_SIGNAL_VOLTAGE_330:
508*4882a593Smuzhiyun 		val = UNIPHIER_SD_VOLT_330;
509*4882a593Smuzhiyun 		break;
510*4882a593Smuzhiyun 	case MMC_SIGNAL_VOLTAGE_180:
511*4882a593Smuzhiyun 		val = UNIPHIER_SD_VOLT_180;
512*4882a593Smuzhiyun 		pinstate = priv->pinstate_uhs;
513*4882a593Smuzhiyun 		break;
514*4882a593Smuzhiyun 	default:
515*4882a593Smuzhiyun 		return -ENOTSUPP;
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	tmp = readl(host->ctl + UNIPHIER_SD_VOLT);
519*4882a593Smuzhiyun 	tmp &= ~UNIPHIER_SD_VOLT_MASK;
520*4882a593Smuzhiyun 	tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val);
521*4882a593Smuzhiyun 	writel(tmp, host->ctl + UNIPHIER_SD_VOLT);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	if (pinstate)
524*4882a593Smuzhiyun 		pinctrl_select_state(priv->pinctrl, pinstate);
525*4882a593Smuzhiyun 	else
526*4882a593Smuzhiyun 		pinctrl_select_default_state(mmc_dev(mmc));
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	return 0;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
uniphier_sd_uhs_init(struct tmio_mmc_host * host,struct uniphier_sd_priv * priv)531*4882a593Smuzhiyun static int uniphier_sd_uhs_init(struct tmio_mmc_host *host,
532*4882a593Smuzhiyun 				struct uniphier_sd_priv *priv)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	priv->pinctrl = devm_pinctrl_get(mmc_dev(host->mmc));
535*4882a593Smuzhiyun 	if (IS_ERR(priv->pinctrl))
536*4882a593Smuzhiyun 		return PTR_ERR(priv->pinctrl);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs");
539*4882a593Smuzhiyun 	if (IS_ERR(priv->pinstate_uhs))
540*4882a593Smuzhiyun 		return PTR_ERR(priv->pinstate_uhs);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	host->ops.start_signal_voltage_switch =
543*4882a593Smuzhiyun 					uniphier_sd_start_signal_voltage_switch;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	return 0;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
uniphier_sd_probe(struct platform_device * pdev)548*4882a593Smuzhiyun static int uniphier_sd_probe(struct platform_device *pdev)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
551*4882a593Smuzhiyun 	struct uniphier_sd_priv *priv;
552*4882a593Smuzhiyun 	struct tmio_mmc_data *tmio_data;
553*4882a593Smuzhiyun 	struct tmio_mmc_host *host;
554*4882a593Smuzhiyun 	int irq, ret;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	irq = platform_get_irq(pdev, 0);
557*4882a593Smuzhiyun 	if (irq < 0)
558*4882a593Smuzhiyun 		return irq;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
561*4882a593Smuzhiyun 	if (!priv)
562*4882a593Smuzhiyun 		return -ENOMEM;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	priv->caps = (unsigned long)of_device_get_match_data(dev);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	priv->clk = devm_clk_get(dev, NULL);
567*4882a593Smuzhiyun 	if (IS_ERR(priv->clk)) {
568*4882a593Smuzhiyun 		dev_err(dev, "failed to get clock\n");
569*4882a593Smuzhiyun 		return PTR_ERR(priv->clk);
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	priv->rst = devm_reset_control_get_shared(dev, "host");
573*4882a593Smuzhiyun 	if (IS_ERR(priv->rst)) {
574*4882a593Smuzhiyun 		dev_err(dev, "failed to get host reset\n");
575*4882a593Smuzhiyun 		return PTR_ERR(priv->rst);
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* old version has one more reset */
579*4882a593Smuzhiyun 	if (!(priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)) {
580*4882a593Smuzhiyun 		priv->rst_br = devm_reset_control_get_shared(dev, "bridge");
581*4882a593Smuzhiyun 		if (IS_ERR(priv->rst_br)) {
582*4882a593Smuzhiyun 			dev_err(dev, "failed to get bridge reset\n");
583*4882a593Smuzhiyun 			return PTR_ERR(priv->rst_br);
584*4882a593Smuzhiyun 		}
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	tmio_data = &priv->tmio_data;
588*4882a593Smuzhiyun 	tmio_data->flags |= TMIO_MMC_32BIT_DATA_PORT;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	host = tmio_mmc_host_alloc(pdev, tmio_data);
591*4882a593Smuzhiyun 	if (IS_ERR(host))
592*4882a593Smuzhiyun 		return PTR_ERR(host);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (host->mmc->caps & MMC_CAP_HW_RESET) {
595*4882a593Smuzhiyun 		priv->rst_hw = devm_reset_control_get_exclusive(dev, "hw");
596*4882a593Smuzhiyun 		if (IS_ERR(priv->rst_hw)) {
597*4882a593Smuzhiyun 			dev_err(dev, "failed to get hw reset\n");
598*4882a593Smuzhiyun 			ret = PTR_ERR(priv->rst_hw);
599*4882a593Smuzhiyun 			goto free_host;
600*4882a593Smuzhiyun 		}
601*4882a593Smuzhiyun 		host->ops.hw_reset = uniphier_sd_hw_reset;
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	if (host->mmc->caps & MMC_CAP_UHS) {
605*4882a593Smuzhiyun 		ret = uniphier_sd_uhs_init(host, priv);
606*4882a593Smuzhiyun 		if (ret) {
607*4882a593Smuzhiyun 			dev_warn(dev,
608*4882a593Smuzhiyun 				 "failed to setup UHS (error %d).  Disabling UHS.",
609*4882a593Smuzhiyun 				 ret);
610*4882a593Smuzhiyun 			host->mmc->caps &= ~MMC_CAP_UHS;
611*4882a593Smuzhiyun 		}
612*4882a593Smuzhiyun 	}
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)
615*4882a593Smuzhiyun 		host->dma_ops = &uniphier_sd_internal_dma_ops;
616*4882a593Smuzhiyun 	else
617*4882a593Smuzhiyun 		host->dma_ops = &uniphier_sd_external_dma_ops;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	host->bus_shift = 1;
620*4882a593Smuzhiyun 	host->clk_enable = uniphier_sd_clk_enable;
621*4882a593Smuzhiyun 	host->clk_disable = uniphier_sd_clk_disable;
622*4882a593Smuzhiyun 	host->set_clock = uniphier_sd_set_clock;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	ret = uniphier_sd_clk_enable(host);
625*4882a593Smuzhiyun 	if (ret)
626*4882a593Smuzhiyun 		goto free_host;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	uniphier_sd_host_init(host);
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	tmio_data->ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34;
631*4882a593Smuzhiyun 	if (host->mmc->caps & MMC_CAP_UHS)
632*4882a593Smuzhiyun 		tmio_data->ocr_mask |= MMC_VDD_165_195;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	tmio_data->max_segs = 1;
635*4882a593Smuzhiyun 	tmio_data->max_blk_count = U16_MAX;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	ret = tmio_mmc_host_probe(host);
638*4882a593Smuzhiyun 	if (ret)
639*4882a593Smuzhiyun 		goto disable_clk;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED,
642*4882a593Smuzhiyun 			       dev_name(dev), host);
643*4882a593Smuzhiyun 	if (ret)
644*4882a593Smuzhiyun 		goto remove_host;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	return 0;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun remove_host:
649*4882a593Smuzhiyun 	tmio_mmc_host_remove(host);
650*4882a593Smuzhiyun disable_clk:
651*4882a593Smuzhiyun 	uniphier_sd_clk_disable(host);
652*4882a593Smuzhiyun free_host:
653*4882a593Smuzhiyun 	tmio_mmc_host_free(host);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	return ret;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
uniphier_sd_remove(struct platform_device * pdev)658*4882a593Smuzhiyun static int uniphier_sd_remove(struct platform_device *pdev)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun 	struct tmio_mmc_host *host = platform_get_drvdata(pdev);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	tmio_mmc_host_remove(host);
663*4882a593Smuzhiyun 	uniphier_sd_clk_disable(host);
664*4882a593Smuzhiyun 	tmio_mmc_host_free(host);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	return 0;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun static const struct of_device_id uniphier_sd_match[] = {
670*4882a593Smuzhiyun 	{
671*4882a593Smuzhiyun 		.compatible = "socionext,uniphier-sd-v2.91",
672*4882a593Smuzhiyun 	},
673*4882a593Smuzhiyun 	{
674*4882a593Smuzhiyun 		.compatible = "socionext,uniphier-sd-v3.1",
675*4882a593Smuzhiyun 		.data = (void *)(UNIPHIER_SD_CAP_EXTENDED_IP |
676*4882a593Smuzhiyun 				 UNIPHIER_SD_CAP_BROKEN_DMA_RX),
677*4882a593Smuzhiyun 	},
678*4882a593Smuzhiyun 	{
679*4882a593Smuzhiyun 		.compatible = "socionext,uniphier-sd-v3.1.1",
680*4882a593Smuzhiyun 		.data = (void *)UNIPHIER_SD_CAP_EXTENDED_IP,
681*4882a593Smuzhiyun 	},
682*4882a593Smuzhiyun 	{ /* sentinel */ }
683*4882a593Smuzhiyun };
684*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, uniphier_sd_match);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun static struct platform_driver uniphier_sd_driver = {
687*4882a593Smuzhiyun 	.probe = uniphier_sd_probe,
688*4882a593Smuzhiyun 	.remove = uniphier_sd_remove,
689*4882a593Smuzhiyun 	.driver = {
690*4882a593Smuzhiyun 		.name = "uniphier-sd",
691*4882a593Smuzhiyun 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
692*4882a593Smuzhiyun 		.of_match_table = uniphier_sd_match,
693*4882a593Smuzhiyun 	},
694*4882a593Smuzhiyun };
695*4882a593Smuzhiyun module_platform_driver(uniphier_sd_driver);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
698*4882a593Smuzhiyun MODULE_DESCRIPTION("UniPhier SD/eMMC host controller driver");
699*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
700