1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Freescale eSDHC controller driver.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.
6*4882a593Smuzhiyun * Copyright (c) 2009 MontaVista Software, Inc.
7*4882a593Smuzhiyun * Copyright 2020 NXP
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Authors: Xiaobo Xie <X.Xie@freescale.com>
10*4882a593Smuzhiyun * Anton Vorontsov <avorontsov@ru.mvista.com>
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <linux/io.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/sys_soc.h>
20*4882a593Smuzhiyun #include <linux/clk.h>
21*4882a593Smuzhiyun #include <linux/ktime.h>
22*4882a593Smuzhiyun #include <linux/dma-mapping.h>
23*4882a593Smuzhiyun #include <linux/iopoll.h>
24*4882a593Smuzhiyun #include <linux/mmc/host.h>
25*4882a593Smuzhiyun #include <linux/mmc/mmc.h>
26*4882a593Smuzhiyun #include "sdhci-pltfm.h"
27*4882a593Smuzhiyun #include "sdhci-esdhc.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define VENDOR_V_22 0x12
30*4882a593Smuzhiyun #define VENDOR_V_23 0x13
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define MMC_TIMING_NUM (MMC_TIMING_MMC_HS400 + 1)
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct esdhc_clk_fixup {
35*4882a593Smuzhiyun const unsigned int sd_dflt_max_clk;
36*4882a593Smuzhiyun const unsigned int max_clk[MMC_TIMING_NUM];
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static const struct esdhc_clk_fixup ls1021a_esdhc_clk = {
40*4882a593Smuzhiyun .sd_dflt_max_clk = 25000000,
41*4882a593Smuzhiyun .max_clk[MMC_TIMING_MMC_HS] = 46500000,
42*4882a593Smuzhiyun .max_clk[MMC_TIMING_SD_HS] = 46500000,
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun static const struct esdhc_clk_fixup ls1046a_esdhc_clk = {
46*4882a593Smuzhiyun .sd_dflt_max_clk = 25000000,
47*4882a593Smuzhiyun .max_clk[MMC_TIMING_UHS_SDR104] = 167000000,
48*4882a593Smuzhiyun .max_clk[MMC_TIMING_MMC_HS200] = 167000000,
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun static const struct esdhc_clk_fixup ls1012a_esdhc_clk = {
52*4882a593Smuzhiyun .sd_dflt_max_clk = 25000000,
53*4882a593Smuzhiyun .max_clk[MMC_TIMING_UHS_SDR104] = 125000000,
54*4882a593Smuzhiyun .max_clk[MMC_TIMING_MMC_HS200] = 125000000,
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun static const struct esdhc_clk_fixup p1010_esdhc_clk = {
58*4882a593Smuzhiyun .sd_dflt_max_clk = 20000000,
59*4882a593Smuzhiyun .max_clk[MMC_TIMING_LEGACY] = 20000000,
60*4882a593Smuzhiyun .max_clk[MMC_TIMING_MMC_HS] = 42000000,
61*4882a593Smuzhiyun .max_clk[MMC_TIMING_SD_HS] = 40000000,
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static const struct of_device_id sdhci_esdhc_of_match[] = {
65*4882a593Smuzhiyun { .compatible = "fsl,ls1021a-esdhc", .data = &ls1021a_esdhc_clk},
66*4882a593Smuzhiyun { .compatible = "fsl,ls1046a-esdhc", .data = &ls1046a_esdhc_clk},
67*4882a593Smuzhiyun { .compatible = "fsl,ls1012a-esdhc", .data = &ls1012a_esdhc_clk},
68*4882a593Smuzhiyun { .compatible = "fsl,p1010-esdhc", .data = &p1010_esdhc_clk},
69*4882a593Smuzhiyun { .compatible = "fsl,mpc8379-esdhc" },
70*4882a593Smuzhiyun { .compatible = "fsl,mpc8536-esdhc" },
71*4882a593Smuzhiyun { .compatible = "fsl,esdhc" },
72*4882a593Smuzhiyun { }
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun struct sdhci_esdhc {
77*4882a593Smuzhiyun u8 vendor_ver;
78*4882a593Smuzhiyun u8 spec_ver;
79*4882a593Smuzhiyun bool quirk_incorrect_hostver;
80*4882a593Smuzhiyun bool quirk_limited_clk_division;
81*4882a593Smuzhiyun bool quirk_unreliable_pulse_detection;
82*4882a593Smuzhiyun bool quirk_tuning_erratum_type1;
83*4882a593Smuzhiyun bool quirk_tuning_erratum_type2;
84*4882a593Smuzhiyun bool quirk_ignore_data_inhibit;
85*4882a593Smuzhiyun bool quirk_delay_before_data_reset;
86*4882a593Smuzhiyun bool quirk_trans_complete_erratum;
87*4882a593Smuzhiyun bool in_sw_tuning;
88*4882a593Smuzhiyun unsigned int peripheral_clock;
89*4882a593Smuzhiyun const struct esdhc_clk_fixup *clk_fixup;
90*4882a593Smuzhiyun u32 div_ratio;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
95*4882a593Smuzhiyun * to make it compatible with SD spec.
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * @host: pointer to sdhci_host
98*4882a593Smuzhiyun * @spec_reg: SD spec register address
99*4882a593Smuzhiyun * @value: 32bit eSDHC register value on spec_reg address
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
102*4882a593Smuzhiyun * registers are 32 bits. There are differences in register size, register
103*4882a593Smuzhiyun * address, register function, bit position and function between eSDHC spec
104*4882a593Smuzhiyun * and SD spec.
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * Return a fixed up register value
107*4882a593Smuzhiyun */
esdhc_readl_fixup(struct sdhci_host * host,int spec_reg,u32 value)108*4882a593Smuzhiyun static u32 esdhc_readl_fixup(struct sdhci_host *host,
109*4882a593Smuzhiyun int spec_reg, u32 value)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
112*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
113*4882a593Smuzhiyun u32 ret;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * The bit of ADMA flag in eSDHC is not compatible with standard
117*4882a593Smuzhiyun * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
118*4882a593Smuzhiyun * supported by eSDHC.
119*4882a593Smuzhiyun * And for many FSL eSDHC controller, the reset value of field
120*4882a593Smuzhiyun * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
121*4882a593Smuzhiyun * only these vendor version is greater than 2.2/0x12 support ADMA.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
124*4882a593Smuzhiyun if (esdhc->vendor_ver > VENDOR_V_22) {
125*4882a593Smuzhiyun ret = value | SDHCI_CAN_DO_ADMA2;
126*4882a593Smuzhiyun return ret;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * The DAT[3:0] line signal levels and the CMD line signal level are
131*4882a593Smuzhiyun * not compatible with standard SDHC register. The line signal levels
132*4882a593Smuzhiyun * DAT[7:0] are at bits 31:24 and the command line signal level is at
133*4882a593Smuzhiyun * bit 23. All other bits are the same as in the standard SDHC
134*4882a593Smuzhiyun * register.
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun if (spec_reg == SDHCI_PRESENT_STATE) {
137*4882a593Smuzhiyun ret = value & 0x000fffff;
138*4882a593Smuzhiyun ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
139*4882a593Smuzhiyun ret |= (value << 1) & SDHCI_CMD_LVL;
140*4882a593Smuzhiyun return ret;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * DTS properties of mmc host are used to enable each speed mode
145*4882a593Smuzhiyun * according to soc and board capability. So clean up
146*4882a593Smuzhiyun * SDR50/SDR104/DDR50 support bits here.
147*4882a593Smuzhiyun */
148*4882a593Smuzhiyun if (spec_reg == SDHCI_CAPABILITIES_1) {
149*4882a593Smuzhiyun ret = value & ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
150*4882a593Smuzhiyun SDHCI_SUPPORT_DDR50);
151*4882a593Smuzhiyun return ret;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * Some controllers have unreliable Data Line Active
156*4882a593Smuzhiyun * bit for commands with busy signal. This affects
157*4882a593Smuzhiyun * Command Inhibit (data) bit. Just ignore it since
158*4882a593Smuzhiyun * MMC core driver has already polled card status
159*4882a593Smuzhiyun * with CMD13 after any command with busy siganl.
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun if ((spec_reg == SDHCI_PRESENT_STATE) &&
162*4882a593Smuzhiyun (esdhc->quirk_ignore_data_inhibit == true)) {
163*4882a593Smuzhiyun ret = value & ~SDHCI_DATA_INHIBIT;
164*4882a593Smuzhiyun return ret;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun ret = value;
168*4882a593Smuzhiyun return ret;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
esdhc_readw_fixup(struct sdhci_host * host,int spec_reg,u32 value)171*4882a593Smuzhiyun static u16 esdhc_readw_fixup(struct sdhci_host *host,
172*4882a593Smuzhiyun int spec_reg, u32 value)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
175*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
176*4882a593Smuzhiyun u16 ret;
177*4882a593Smuzhiyun int shift = (spec_reg & 0x2) * 8;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (spec_reg == SDHCI_TRANSFER_MODE)
180*4882a593Smuzhiyun return pltfm_host->xfer_mode_shadow;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (spec_reg == SDHCI_HOST_VERSION)
183*4882a593Smuzhiyun ret = value & 0xffff;
184*4882a593Smuzhiyun else
185*4882a593Smuzhiyun ret = (value >> shift) & 0xffff;
186*4882a593Smuzhiyun /* Workaround for T4240-R1.0-R2.0 eSDHC which has incorrect
187*4882a593Smuzhiyun * vendor version and spec version information.
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun if ((spec_reg == SDHCI_HOST_VERSION) &&
190*4882a593Smuzhiyun (esdhc->quirk_incorrect_hostver))
191*4882a593Smuzhiyun ret = (VENDOR_V_23 << SDHCI_VENDOR_VER_SHIFT) | SDHCI_SPEC_200;
192*4882a593Smuzhiyun return ret;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
esdhc_readb_fixup(struct sdhci_host * host,int spec_reg,u32 value)195*4882a593Smuzhiyun static u8 esdhc_readb_fixup(struct sdhci_host *host,
196*4882a593Smuzhiyun int spec_reg, u32 value)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun u8 ret;
199*4882a593Smuzhiyun u8 dma_bits;
200*4882a593Smuzhiyun int shift = (spec_reg & 0x3) * 8;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun ret = (value >> shift) & 0xff;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun * "DMA select" locates at offset 0x28 in SD specification, but on
206*4882a593Smuzhiyun * P5020 or P3041, it locates at 0x29.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun if (spec_reg == SDHCI_HOST_CONTROL) {
209*4882a593Smuzhiyun /* DMA select is 22,23 bits in Protocol Control Register */
210*4882a593Smuzhiyun dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
211*4882a593Smuzhiyun /* fixup the result */
212*4882a593Smuzhiyun ret &= ~SDHCI_CTRL_DMA_MASK;
213*4882a593Smuzhiyun ret |= dma_bits;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun return ret;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun * esdhc_write*_fixup - Fixup the SD spec register value so that it could be
220*4882a593Smuzhiyun * written into eSDHC register.
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun * @host: pointer to sdhci_host
223*4882a593Smuzhiyun * @spec_reg: SD spec register address
224*4882a593Smuzhiyun * @value: 8/16/32bit SD spec register value that would be written
225*4882a593Smuzhiyun * @old_value: 32bit eSDHC register value on spec_reg address
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
228*4882a593Smuzhiyun * registers are 32 bits. There are differences in register size, register
229*4882a593Smuzhiyun * address, register function, bit position and function between eSDHC spec
230*4882a593Smuzhiyun * and SD spec.
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * Return a fixed up register value
233*4882a593Smuzhiyun */
esdhc_writel_fixup(struct sdhci_host * host,int spec_reg,u32 value,u32 old_value)234*4882a593Smuzhiyun static u32 esdhc_writel_fixup(struct sdhci_host *host,
235*4882a593Smuzhiyun int spec_reg, u32 value, u32 old_value)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun u32 ret;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /*
240*4882a593Smuzhiyun * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
241*4882a593Smuzhiyun * when SYSCTL[RSTD] is set for some special operations.
242*4882a593Smuzhiyun * No any impact on other operation.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun if (spec_reg == SDHCI_INT_ENABLE)
245*4882a593Smuzhiyun ret = value | SDHCI_INT_BLK_GAP;
246*4882a593Smuzhiyun else
247*4882a593Smuzhiyun ret = value;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun return ret;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
esdhc_writew_fixup(struct sdhci_host * host,int spec_reg,u16 value,u32 old_value)252*4882a593Smuzhiyun static u32 esdhc_writew_fixup(struct sdhci_host *host,
253*4882a593Smuzhiyun int spec_reg, u16 value, u32 old_value)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
256*4882a593Smuzhiyun int shift = (spec_reg & 0x2) * 8;
257*4882a593Smuzhiyun u32 ret;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun switch (spec_reg) {
260*4882a593Smuzhiyun case SDHCI_TRANSFER_MODE:
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun * Postpone this write, we must do it together with a
263*4882a593Smuzhiyun * command write that is down below. Return old value.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun pltfm_host->xfer_mode_shadow = value;
266*4882a593Smuzhiyun return old_value;
267*4882a593Smuzhiyun case SDHCI_COMMAND:
268*4882a593Smuzhiyun ret = (value << 16) | pltfm_host->xfer_mode_shadow;
269*4882a593Smuzhiyun return ret;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun ret = old_value & (~(0xffff << shift));
273*4882a593Smuzhiyun ret |= (value << shift);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (spec_reg == SDHCI_BLOCK_SIZE) {
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun * Two last DMA bits are reserved, and first one is used for
278*4882a593Smuzhiyun * non-standard blksz of 4096 bytes that we don't support
279*4882a593Smuzhiyun * yet. So clear the DMA boundary bits.
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun return ret;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
esdhc_writeb_fixup(struct sdhci_host * host,int spec_reg,u8 value,u32 old_value)286*4882a593Smuzhiyun static u32 esdhc_writeb_fixup(struct sdhci_host *host,
287*4882a593Smuzhiyun int spec_reg, u8 value, u32 old_value)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun u32 ret;
290*4882a593Smuzhiyun u32 dma_bits;
291*4882a593Smuzhiyun u8 tmp;
292*4882a593Smuzhiyun int shift = (spec_reg & 0x3) * 8;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * eSDHC doesn't have a standard power control register, so we do
296*4882a593Smuzhiyun * nothing here to avoid incorrect operation.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun if (spec_reg == SDHCI_POWER_CONTROL)
299*4882a593Smuzhiyun return old_value;
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * "DMA select" location is offset 0x28 in SD specification, but on
302*4882a593Smuzhiyun * P5020 or P3041, it's located at 0x29.
303*4882a593Smuzhiyun */
304*4882a593Smuzhiyun if (spec_reg == SDHCI_HOST_CONTROL) {
305*4882a593Smuzhiyun /*
306*4882a593Smuzhiyun * If host control register is not standard, exit
307*4882a593Smuzhiyun * this function
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
310*4882a593Smuzhiyun return old_value;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* DMA select is 22,23 bits in Protocol Control Register */
313*4882a593Smuzhiyun dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
314*4882a593Smuzhiyun ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
315*4882a593Smuzhiyun tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
316*4882a593Smuzhiyun (old_value & SDHCI_CTRL_DMA_MASK);
317*4882a593Smuzhiyun ret = (ret & (~0xff)) | tmp;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
320*4882a593Smuzhiyun ret &= ~ESDHC_HOST_CONTROL_RES;
321*4882a593Smuzhiyun return ret;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun ret = (old_value & (~(0xff << shift))) | (value << shift);
325*4882a593Smuzhiyun return ret;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
esdhc_be_readl(struct sdhci_host * host,int reg)328*4882a593Smuzhiyun static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun u32 ret;
331*4882a593Smuzhiyun u32 value;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (reg == SDHCI_CAPABILITIES_1)
334*4882a593Smuzhiyun value = ioread32be(host->ioaddr + ESDHC_CAPABILITIES_1);
335*4882a593Smuzhiyun else
336*4882a593Smuzhiyun value = ioread32be(host->ioaddr + reg);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun ret = esdhc_readl_fixup(host, reg, value);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun return ret;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
esdhc_le_readl(struct sdhci_host * host,int reg)343*4882a593Smuzhiyun static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun u32 ret;
346*4882a593Smuzhiyun u32 value;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (reg == SDHCI_CAPABILITIES_1)
349*4882a593Smuzhiyun value = ioread32(host->ioaddr + ESDHC_CAPABILITIES_1);
350*4882a593Smuzhiyun else
351*4882a593Smuzhiyun value = ioread32(host->ioaddr + reg);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun ret = esdhc_readl_fixup(host, reg, value);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun return ret;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
esdhc_be_readw(struct sdhci_host * host,int reg)358*4882a593Smuzhiyun static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun u16 ret;
361*4882a593Smuzhiyun u32 value;
362*4882a593Smuzhiyun int base = reg & ~0x3;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun value = ioread32be(host->ioaddr + base);
365*4882a593Smuzhiyun ret = esdhc_readw_fixup(host, reg, value);
366*4882a593Smuzhiyun return ret;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
esdhc_le_readw(struct sdhci_host * host,int reg)369*4882a593Smuzhiyun static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun u16 ret;
372*4882a593Smuzhiyun u32 value;
373*4882a593Smuzhiyun int base = reg & ~0x3;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun value = ioread32(host->ioaddr + base);
376*4882a593Smuzhiyun ret = esdhc_readw_fixup(host, reg, value);
377*4882a593Smuzhiyun return ret;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
esdhc_be_readb(struct sdhci_host * host,int reg)380*4882a593Smuzhiyun static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun u8 ret;
383*4882a593Smuzhiyun u32 value;
384*4882a593Smuzhiyun int base = reg & ~0x3;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun value = ioread32be(host->ioaddr + base);
387*4882a593Smuzhiyun ret = esdhc_readb_fixup(host, reg, value);
388*4882a593Smuzhiyun return ret;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
esdhc_le_readb(struct sdhci_host * host,int reg)391*4882a593Smuzhiyun static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun u8 ret;
394*4882a593Smuzhiyun u32 value;
395*4882a593Smuzhiyun int base = reg & ~0x3;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun value = ioread32(host->ioaddr + base);
398*4882a593Smuzhiyun ret = esdhc_readb_fixup(host, reg, value);
399*4882a593Smuzhiyun return ret;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
esdhc_be_writel(struct sdhci_host * host,u32 val,int reg)402*4882a593Smuzhiyun static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun u32 value;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun value = esdhc_writel_fixup(host, reg, val, 0);
407*4882a593Smuzhiyun iowrite32be(value, host->ioaddr + reg);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
esdhc_le_writel(struct sdhci_host * host,u32 val,int reg)410*4882a593Smuzhiyun static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun u32 value;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun value = esdhc_writel_fixup(host, reg, val, 0);
415*4882a593Smuzhiyun iowrite32(value, host->ioaddr + reg);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
esdhc_be_writew(struct sdhci_host * host,u16 val,int reg)418*4882a593Smuzhiyun static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
421*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
422*4882a593Smuzhiyun int base = reg & ~0x3;
423*4882a593Smuzhiyun u32 value;
424*4882a593Smuzhiyun u32 ret;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun value = ioread32be(host->ioaddr + base);
427*4882a593Smuzhiyun ret = esdhc_writew_fixup(host, reg, val, value);
428*4882a593Smuzhiyun if (reg != SDHCI_TRANSFER_MODE)
429*4882a593Smuzhiyun iowrite32be(ret, host->ioaddr + base);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
432*4882a593Smuzhiyun * 1us later after ESDHC_EXTN is set.
433*4882a593Smuzhiyun */
434*4882a593Smuzhiyun if (base == ESDHC_SYSTEM_CONTROL_2) {
435*4882a593Smuzhiyun if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
436*4882a593Smuzhiyun esdhc->in_sw_tuning) {
437*4882a593Smuzhiyun udelay(1);
438*4882a593Smuzhiyun ret |= ESDHC_SMPCLKSEL;
439*4882a593Smuzhiyun iowrite32be(ret, host->ioaddr + base);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
esdhc_le_writew(struct sdhci_host * host,u16 val,int reg)444*4882a593Smuzhiyun static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
447*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
448*4882a593Smuzhiyun int base = reg & ~0x3;
449*4882a593Smuzhiyun u32 value;
450*4882a593Smuzhiyun u32 ret;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun value = ioread32(host->ioaddr + base);
453*4882a593Smuzhiyun ret = esdhc_writew_fixup(host, reg, val, value);
454*4882a593Smuzhiyun if (reg != SDHCI_TRANSFER_MODE)
455*4882a593Smuzhiyun iowrite32(ret, host->ioaddr + base);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* Starting SW tuning requires ESDHC_SMPCLKSEL to be set
458*4882a593Smuzhiyun * 1us later after ESDHC_EXTN is set.
459*4882a593Smuzhiyun */
460*4882a593Smuzhiyun if (base == ESDHC_SYSTEM_CONTROL_2) {
461*4882a593Smuzhiyun if (!(value & ESDHC_EXTN) && (ret & ESDHC_EXTN) &&
462*4882a593Smuzhiyun esdhc->in_sw_tuning) {
463*4882a593Smuzhiyun udelay(1);
464*4882a593Smuzhiyun ret |= ESDHC_SMPCLKSEL;
465*4882a593Smuzhiyun iowrite32(ret, host->ioaddr + base);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
esdhc_be_writeb(struct sdhci_host * host,u8 val,int reg)470*4882a593Smuzhiyun static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun int base = reg & ~0x3;
473*4882a593Smuzhiyun u32 value;
474*4882a593Smuzhiyun u32 ret;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun value = ioread32be(host->ioaddr + base);
477*4882a593Smuzhiyun ret = esdhc_writeb_fixup(host, reg, val, value);
478*4882a593Smuzhiyun iowrite32be(ret, host->ioaddr + base);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
esdhc_le_writeb(struct sdhci_host * host,u8 val,int reg)481*4882a593Smuzhiyun static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun int base = reg & ~0x3;
484*4882a593Smuzhiyun u32 value;
485*4882a593Smuzhiyun u32 ret;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun value = ioread32(host->ioaddr + base);
488*4882a593Smuzhiyun ret = esdhc_writeb_fixup(host, reg, val, value);
489*4882a593Smuzhiyun iowrite32(ret, host->ioaddr + base);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /*
493*4882a593Smuzhiyun * For Abort or Suspend after Stop at Block Gap, ignore the ADMA
494*4882a593Smuzhiyun * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC])
495*4882a593Smuzhiyun * and Block Gap Event(IRQSTAT[BGE]) are also set.
496*4882a593Smuzhiyun * For Continue, apply soft reset for data(SYSCTL[RSTD]);
497*4882a593Smuzhiyun * and re-issue the entire read transaction from beginning.
498*4882a593Smuzhiyun */
esdhc_of_adma_workaround(struct sdhci_host * host,u32 intmask)499*4882a593Smuzhiyun static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
502*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
503*4882a593Smuzhiyun bool applicable;
504*4882a593Smuzhiyun dma_addr_t dmastart;
505*4882a593Smuzhiyun dma_addr_t dmanow;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun applicable = (intmask & SDHCI_INT_DATA_END) &&
508*4882a593Smuzhiyun (intmask & SDHCI_INT_BLK_GAP) &&
509*4882a593Smuzhiyun (esdhc->vendor_ver == VENDOR_V_23);
510*4882a593Smuzhiyun if (!applicable)
511*4882a593Smuzhiyun return;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun host->data->error = 0;
514*4882a593Smuzhiyun dmastart = sg_dma_address(host->data->sg);
515*4882a593Smuzhiyun dmanow = dmastart + host->data->bytes_xfered;
516*4882a593Smuzhiyun /*
517*4882a593Smuzhiyun * Force update to the next DMA block boundary.
518*4882a593Smuzhiyun */
519*4882a593Smuzhiyun dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
520*4882a593Smuzhiyun SDHCI_DEFAULT_BOUNDARY_SIZE;
521*4882a593Smuzhiyun host->data->bytes_xfered = dmanow - dmastart;
522*4882a593Smuzhiyun sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
esdhc_of_enable_dma(struct sdhci_host * host)525*4882a593Smuzhiyun static int esdhc_of_enable_dma(struct sdhci_host *host)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun int ret;
528*4882a593Smuzhiyun u32 value;
529*4882a593Smuzhiyun struct device *dev = mmc_dev(host->mmc);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
532*4882a593Smuzhiyun of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
533*4882a593Smuzhiyun ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
534*4882a593Smuzhiyun if (ret)
535*4882a593Smuzhiyun return ret;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (of_dma_is_coherent(dev->of_node))
541*4882a593Smuzhiyun value |= ESDHC_DMA_SNOOP;
542*4882a593Smuzhiyun else
543*4882a593Smuzhiyun value &= ~ESDHC_DMA_SNOOP;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
546*4882a593Smuzhiyun return 0;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
esdhc_of_get_max_clock(struct sdhci_host * host)549*4882a593Smuzhiyun static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
552*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (esdhc->peripheral_clock)
555*4882a593Smuzhiyun return esdhc->peripheral_clock;
556*4882a593Smuzhiyun else
557*4882a593Smuzhiyun return pltfm_host->clock;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
esdhc_of_get_min_clock(struct sdhci_host * host)560*4882a593Smuzhiyun static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
563*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
564*4882a593Smuzhiyun unsigned int clock;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (esdhc->peripheral_clock)
567*4882a593Smuzhiyun clock = esdhc->peripheral_clock;
568*4882a593Smuzhiyun else
569*4882a593Smuzhiyun clock = pltfm_host->clock;
570*4882a593Smuzhiyun return clock / 256 / 16;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
esdhc_clock_enable(struct sdhci_host * host,bool enable)573*4882a593Smuzhiyun static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
576*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
577*4882a593Smuzhiyun ktime_t timeout;
578*4882a593Smuzhiyun u32 val, clk_en;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun clk_en = ESDHC_CLOCK_SDCLKEN;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /*
583*4882a593Smuzhiyun * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version
584*4882a593Smuzhiyun * is 2.2 or lower.
585*4882a593Smuzhiyun */
586*4882a593Smuzhiyun if (esdhc->vendor_ver <= VENDOR_V_22)
587*4882a593Smuzhiyun clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN |
588*4882a593Smuzhiyun ESDHC_CLOCK_PEREN);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (enable)
593*4882a593Smuzhiyun val |= clk_en;
594*4882a593Smuzhiyun else
595*4882a593Smuzhiyun val &= ~clk_en;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /*
600*4882a593Smuzhiyun * Wait max 20 ms. If vendor version is 2.2 or lower, do not
601*4882a593Smuzhiyun * wait clock stable bit which does not exist.
602*4882a593Smuzhiyun */
603*4882a593Smuzhiyun timeout = ktime_add_ms(ktime_get(), 20);
604*4882a593Smuzhiyun while (esdhc->vendor_ver > VENDOR_V_22) {
605*4882a593Smuzhiyun bool timedout = ktime_after(ktime_get(), timeout);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
608*4882a593Smuzhiyun break;
609*4882a593Smuzhiyun if (timedout) {
610*4882a593Smuzhiyun pr_err("%s: Internal clock never stabilised.\n",
611*4882a593Smuzhiyun mmc_hostname(host->mmc));
612*4882a593Smuzhiyun break;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun usleep_range(10, 20);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
esdhc_flush_async_fifo(struct sdhci_host * host)618*4882a593Smuzhiyun static void esdhc_flush_async_fifo(struct sdhci_host *host)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun ktime_t timeout;
621*4882a593Smuzhiyun u32 val;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
624*4882a593Smuzhiyun val |= ESDHC_FLUSH_ASYNC_FIFO;
625*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* Wait max 20 ms */
628*4882a593Smuzhiyun timeout = ktime_add_ms(ktime_get(), 20);
629*4882a593Smuzhiyun while (1) {
630*4882a593Smuzhiyun bool timedout = ktime_after(ktime_get(), timeout);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (!(sdhci_readl(host, ESDHC_DMA_SYSCTL) &
633*4882a593Smuzhiyun ESDHC_FLUSH_ASYNC_FIFO))
634*4882a593Smuzhiyun break;
635*4882a593Smuzhiyun if (timedout) {
636*4882a593Smuzhiyun pr_err("%s: flushing asynchronous FIFO timeout.\n",
637*4882a593Smuzhiyun mmc_hostname(host->mmc));
638*4882a593Smuzhiyun break;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun usleep_range(10, 20);
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
esdhc_of_set_clock(struct sdhci_host * host,unsigned int clock)644*4882a593Smuzhiyun static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
647*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
648*4882a593Smuzhiyun unsigned int pre_div = 1, div = 1;
649*4882a593Smuzhiyun unsigned int clock_fixup = 0;
650*4882a593Smuzhiyun ktime_t timeout;
651*4882a593Smuzhiyun u32 temp;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun if (clock == 0) {
654*4882a593Smuzhiyun host->mmc->actual_clock = 0;
655*4882a593Smuzhiyun esdhc_clock_enable(host, false);
656*4882a593Smuzhiyun return;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /* Start pre_div at 2 for vendor version < 2.3. */
660*4882a593Smuzhiyun if (esdhc->vendor_ver < VENDOR_V_23)
661*4882a593Smuzhiyun pre_div = 2;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /* Fix clock value. */
664*4882a593Smuzhiyun if (host->mmc->card && mmc_card_sd(host->mmc->card) &&
665*4882a593Smuzhiyun esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY)
666*4882a593Smuzhiyun clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk;
667*4882a593Smuzhiyun else if (esdhc->clk_fixup)
668*4882a593Smuzhiyun clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing];
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (clock_fixup == 0 || clock < clock_fixup)
671*4882a593Smuzhiyun clock_fixup = clock;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* Calculate pre_div and div. */
674*4882a593Smuzhiyun while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256)
675*4882a593Smuzhiyun pre_div *= 2;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun while (host->max_clk / pre_div / div > clock_fixup && div < 16)
678*4882a593Smuzhiyun div++;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun esdhc->div_ratio = pre_div * div;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* Limit clock division for HS400 200MHz clock for quirk. */
683*4882a593Smuzhiyun if (esdhc->quirk_limited_clk_division &&
684*4882a593Smuzhiyun clock == MMC_HS200_MAX_DTR &&
685*4882a593Smuzhiyun (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 ||
686*4882a593Smuzhiyun host->flags & SDHCI_HS400_TUNING)) {
687*4882a593Smuzhiyun if (esdhc->div_ratio <= 4) {
688*4882a593Smuzhiyun pre_div = 4;
689*4882a593Smuzhiyun div = 1;
690*4882a593Smuzhiyun } else if (esdhc->div_ratio <= 8) {
691*4882a593Smuzhiyun pre_div = 4;
692*4882a593Smuzhiyun div = 2;
693*4882a593Smuzhiyun } else if (esdhc->div_ratio <= 12) {
694*4882a593Smuzhiyun pre_div = 4;
695*4882a593Smuzhiyun div = 3;
696*4882a593Smuzhiyun } else {
697*4882a593Smuzhiyun pr_warn("%s: using unsupported clock division.\n",
698*4882a593Smuzhiyun mmc_hostname(host->mmc));
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun esdhc->div_ratio = pre_div * div;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun host->mmc->actual_clock = host->max_clk / esdhc->div_ratio;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n",
706*4882a593Smuzhiyun clock, host->mmc->actual_clock);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /* Set clock division into register. */
709*4882a593Smuzhiyun pre_div >>= 1;
710*4882a593Smuzhiyun div--;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun esdhc_clock_enable(host, false);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
715*4882a593Smuzhiyun temp &= ~ESDHC_CLOCK_MASK;
716*4882a593Smuzhiyun temp |= ((div << ESDHC_DIVIDER_SHIFT) |
717*4882a593Smuzhiyun (pre_div << ESDHC_PREDIV_SHIFT));
718*4882a593Smuzhiyun sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /*
721*4882a593Smuzhiyun * Wait max 20 ms. If vendor version is 2.2 or lower, do not
722*4882a593Smuzhiyun * wait clock stable bit which does not exist.
723*4882a593Smuzhiyun */
724*4882a593Smuzhiyun timeout = ktime_add_ms(ktime_get(), 20);
725*4882a593Smuzhiyun while (esdhc->vendor_ver > VENDOR_V_22) {
726*4882a593Smuzhiyun bool timedout = ktime_after(ktime_get(), timeout);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
729*4882a593Smuzhiyun break;
730*4882a593Smuzhiyun if (timedout) {
731*4882a593Smuzhiyun pr_err("%s: Internal clock never stabilised.\n",
732*4882a593Smuzhiyun mmc_hostname(host->mmc));
733*4882a593Smuzhiyun break;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun usleep_range(10, 20);
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /* Additional setting for HS400. */
739*4882a593Smuzhiyun if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
740*4882a593Smuzhiyun clock == MMC_HS200_MAX_DTR) {
741*4882a593Smuzhiyun temp = sdhci_readl(host, ESDHC_TBCTL);
742*4882a593Smuzhiyun sdhci_writel(host, temp | ESDHC_HS400_MODE, ESDHC_TBCTL);
743*4882a593Smuzhiyun temp = sdhci_readl(host, ESDHC_SDCLKCTL);
744*4882a593Smuzhiyun sdhci_writel(host, temp | ESDHC_CMD_CLK_CTL, ESDHC_SDCLKCTL);
745*4882a593Smuzhiyun esdhc_clock_enable(host, true);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun temp = sdhci_readl(host, ESDHC_DLLCFG0);
748*4882a593Smuzhiyun temp |= ESDHC_DLL_ENABLE;
749*4882a593Smuzhiyun if (host->mmc->actual_clock == MMC_HS200_MAX_DTR)
750*4882a593Smuzhiyun temp |= ESDHC_DLL_FREQ_SEL;
751*4882a593Smuzhiyun sdhci_writel(host, temp, ESDHC_DLLCFG0);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun temp |= ESDHC_DLL_RESET;
754*4882a593Smuzhiyun sdhci_writel(host, temp, ESDHC_DLLCFG0);
755*4882a593Smuzhiyun udelay(1);
756*4882a593Smuzhiyun temp &= ~ESDHC_DLL_RESET;
757*4882a593Smuzhiyun sdhci_writel(host, temp, ESDHC_DLLCFG0);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun /* Wait max 20 ms */
760*4882a593Smuzhiyun if (read_poll_timeout(sdhci_readl, temp,
761*4882a593Smuzhiyun temp & ESDHC_DLL_STS_SLV_LOCK,
762*4882a593Smuzhiyun 10, 20000, false,
763*4882a593Smuzhiyun host, ESDHC_DLLSTAT0))
764*4882a593Smuzhiyun pr_err("%s: timeout for delay chain lock.\n",
765*4882a593Smuzhiyun mmc_hostname(host->mmc));
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun temp = sdhci_readl(host, ESDHC_TBCTL);
768*4882a593Smuzhiyun sdhci_writel(host, temp | ESDHC_HS400_WNDW_ADJUST, ESDHC_TBCTL);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun esdhc_clock_enable(host, false);
771*4882a593Smuzhiyun esdhc_flush_async_fifo(host);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun esdhc_clock_enable(host, true);
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
esdhc_pltfm_set_bus_width(struct sdhci_host * host,int width)776*4882a593Smuzhiyun static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun u32 ctrl;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun ctrl = sdhci_readl(host, ESDHC_PROCTL);
781*4882a593Smuzhiyun ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
782*4882a593Smuzhiyun switch (width) {
783*4882a593Smuzhiyun case MMC_BUS_WIDTH_8:
784*4882a593Smuzhiyun ctrl |= ESDHC_CTRL_8BITBUS;
785*4882a593Smuzhiyun break;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun case MMC_BUS_WIDTH_4:
788*4882a593Smuzhiyun ctrl |= ESDHC_CTRL_4BITBUS;
789*4882a593Smuzhiyun break;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun default:
792*4882a593Smuzhiyun break;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun sdhci_writel(host, ctrl, ESDHC_PROCTL);
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
esdhc_reset(struct sdhci_host * host,u8 mask)798*4882a593Smuzhiyun static void esdhc_reset(struct sdhci_host *host, u8 mask)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
801*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
802*4882a593Smuzhiyun u32 val, bus_width = 0;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /*
805*4882a593Smuzhiyun * Add delay to make sure all the DMA transfers are finished
806*4882a593Smuzhiyun * for quirk.
807*4882a593Smuzhiyun */
808*4882a593Smuzhiyun if (esdhc->quirk_delay_before_data_reset &&
809*4882a593Smuzhiyun (mask & SDHCI_RESET_DATA) &&
810*4882a593Smuzhiyun (host->flags & SDHCI_REQ_USE_DMA))
811*4882a593Smuzhiyun mdelay(5);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /*
814*4882a593Smuzhiyun * Save bus-width for eSDHC whose vendor version is 2.2
815*4882a593Smuzhiyun * or lower for data reset.
816*4882a593Smuzhiyun */
817*4882a593Smuzhiyun if ((mask & SDHCI_RESET_DATA) &&
818*4882a593Smuzhiyun (esdhc->vendor_ver <= VENDOR_V_22)) {
819*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_PROCTL);
820*4882a593Smuzhiyun bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun sdhci_reset(host, mask);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /*
826*4882a593Smuzhiyun * Restore bus-width setting and interrupt registers for eSDHC
827*4882a593Smuzhiyun * whose vendor version is 2.2 or lower for data reset.
828*4882a593Smuzhiyun */
829*4882a593Smuzhiyun if ((mask & SDHCI_RESET_DATA) &&
830*4882a593Smuzhiyun (esdhc->vendor_ver <= VENDOR_V_22)) {
831*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_PROCTL);
832*4882a593Smuzhiyun val &= ~ESDHC_CTRL_BUSWIDTH_MASK;
833*4882a593Smuzhiyun val |= bus_width;
834*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_PROCTL);
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
837*4882a593Smuzhiyun sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /*
841*4882a593Smuzhiyun * Some bits have to be cleaned manually for eSDHC whose spec
842*4882a593Smuzhiyun * version is higher than 3.0 for all reset.
843*4882a593Smuzhiyun */
844*4882a593Smuzhiyun if ((mask & SDHCI_RESET_ALL) &&
845*4882a593Smuzhiyun (esdhc->spec_ver >= SDHCI_SPEC_300)) {
846*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBCTL);
847*4882a593Smuzhiyun val &= ~ESDHC_TB_EN;
848*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_TBCTL);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /*
851*4882a593Smuzhiyun * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to
852*4882a593Smuzhiyun * 0 for quirk.
853*4882a593Smuzhiyun */
854*4882a593Smuzhiyun if (esdhc->quirk_unreliable_pulse_detection) {
855*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_DLLCFG1);
856*4882a593Smuzhiyun val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL;
857*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_DLLCFG1);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* The SCFG, Supplemental Configuration Unit, provides SoC specific
863*4882a593Smuzhiyun * configuration and status registers for the device. There is a
864*4882a593Smuzhiyun * SDHC IO VSEL control register on SCFG for some platforms. It's
865*4882a593Smuzhiyun * used to support SDHC IO voltage switching.
866*4882a593Smuzhiyun */
867*4882a593Smuzhiyun static const struct of_device_id scfg_device_ids[] = {
868*4882a593Smuzhiyun { .compatible = "fsl,t1040-scfg", },
869*4882a593Smuzhiyun { .compatible = "fsl,ls1012a-scfg", },
870*4882a593Smuzhiyun { .compatible = "fsl,ls1046a-scfg", },
871*4882a593Smuzhiyun {}
872*4882a593Smuzhiyun };
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /* SDHC IO VSEL control register definition */
875*4882a593Smuzhiyun #define SCFG_SDHCIOVSELCR 0x408
876*4882a593Smuzhiyun #define SDHCIOVSELCR_TGLEN 0x80000000
877*4882a593Smuzhiyun #define SDHCIOVSELCR_VSELVAL 0x60000000
878*4882a593Smuzhiyun #define SDHCIOVSELCR_SDHC_VS 0x00000001
879*4882a593Smuzhiyun
esdhc_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)880*4882a593Smuzhiyun static int esdhc_signal_voltage_switch(struct mmc_host *mmc,
881*4882a593Smuzhiyun struct mmc_ios *ios)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun struct sdhci_host *host = mmc_priv(mmc);
884*4882a593Smuzhiyun struct device_node *scfg_node;
885*4882a593Smuzhiyun void __iomem *scfg_base = NULL;
886*4882a593Smuzhiyun u32 sdhciovselcr;
887*4882a593Smuzhiyun u32 val;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun /*
890*4882a593Smuzhiyun * Signal Voltage Switching is only applicable for Host Controllers
891*4882a593Smuzhiyun * v3.00 and above.
892*4882a593Smuzhiyun */
893*4882a593Smuzhiyun if (host->version < SDHCI_SPEC_300)
894*4882a593Smuzhiyun return 0;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_PROCTL);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun switch (ios->signal_voltage) {
899*4882a593Smuzhiyun case MMC_SIGNAL_VOLTAGE_330:
900*4882a593Smuzhiyun val &= ~ESDHC_VOLT_SEL;
901*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_PROCTL);
902*4882a593Smuzhiyun return 0;
903*4882a593Smuzhiyun case MMC_SIGNAL_VOLTAGE_180:
904*4882a593Smuzhiyun scfg_node = of_find_matching_node(NULL, scfg_device_ids);
905*4882a593Smuzhiyun if (scfg_node)
906*4882a593Smuzhiyun scfg_base = of_iomap(scfg_node, 0);
907*4882a593Smuzhiyun of_node_put(scfg_node);
908*4882a593Smuzhiyun if (scfg_base) {
909*4882a593Smuzhiyun sdhciovselcr = SDHCIOVSELCR_TGLEN |
910*4882a593Smuzhiyun SDHCIOVSELCR_VSELVAL;
911*4882a593Smuzhiyun iowrite32be(sdhciovselcr,
912*4882a593Smuzhiyun scfg_base + SCFG_SDHCIOVSELCR);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun val |= ESDHC_VOLT_SEL;
915*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_PROCTL);
916*4882a593Smuzhiyun mdelay(5);
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun sdhciovselcr = SDHCIOVSELCR_TGLEN |
919*4882a593Smuzhiyun SDHCIOVSELCR_SDHC_VS;
920*4882a593Smuzhiyun iowrite32be(sdhciovselcr,
921*4882a593Smuzhiyun scfg_base + SCFG_SDHCIOVSELCR);
922*4882a593Smuzhiyun iounmap(scfg_base);
923*4882a593Smuzhiyun } else {
924*4882a593Smuzhiyun val |= ESDHC_VOLT_SEL;
925*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_PROCTL);
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun return 0;
928*4882a593Smuzhiyun default:
929*4882a593Smuzhiyun return 0;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun static struct soc_device_attribute soc_tuning_erratum_type1[] = {
934*4882a593Smuzhiyun { .family = "QorIQ T1023", },
935*4882a593Smuzhiyun { .family = "QorIQ T1040", },
936*4882a593Smuzhiyun { .family = "QorIQ T2080", },
937*4882a593Smuzhiyun { .family = "QorIQ LS1021A", },
938*4882a593Smuzhiyun { },
939*4882a593Smuzhiyun };
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun static struct soc_device_attribute soc_tuning_erratum_type2[] = {
942*4882a593Smuzhiyun { .family = "QorIQ LS1012A", },
943*4882a593Smuzhiyun { .family = "QorIQ LS1043A", },
944*4882a593Smuzhiyun { .family = "QorIQ LS1046A", },
945*4882a593Smuzhiyun { .family = "QorIQ LS1080A", },
946*4882a593Smuzhiyun { .family = "QorIQ LS2080A", },
947*4882a593Smuzhiyun { .family = "QorIQ LA1575A", },
948*4882a593Smuzhiyun { },
949*4882a593Smuzhiyun };
950*4882a593Smuzhiyun
esdhc_tuning_block_enable(struct sdhci_host * host,bool enable)951*4882a593Smuzhiyun static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun u32 val;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun esdhc_clock_enable(host, false);
956*4882a593Smuzhiyun esdhc_flush_async_fifo(host);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBCTL);
959*4882a593Smuzhiyun if (enable)
960*4882a593Smuzhiyun val |= ESDHC_TB_EN;
961*4882a593Smuzhiyun else
962*4882a593Smuzhiyun val &= ~ESDHC_TB_EN;
963*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_TBCTL);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun esdhc_clock_enable(host, true);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
esdhc_tuning_window_ptr(struct sdhci_host * host,u8 * window_start,u8 * window_end)968*4882a593Smuzhiyun static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start,
969*4882a593Smuzhiyun u8 *window_end)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun u32 val;
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun /* Write TBCTL[11:8]=4'h8 */
974*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBCTL);
975*4882a593Smuzhiyun val &= ~(0xf << 8);
976*4882a593Smuzhiyun val |= 8 << 8;
977*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_TBCTL);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun mdelay(1);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /* Read TBCTL[31:0] register and rewrite again */
982*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBCTL);
983*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_TBCTL);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun mdelay(1);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /* Read the TBSTAT[31:0] register twice */
988*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBSTAT);
989*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBSTAT);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun *window_end = val & 0xff;
992*4882a593Smuzhiyun *window_start = (val >> 8) & 0xff;
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
esdhc_prepare_sw_tuning(struct sdhci_host * host,u8 * window_start,u8 * window_end)995*4882a593Smuzhiyun static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start,
996*4882a593Smuzhiyun u8 *window_end)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
999*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1000*4882a593Smuzhiyun u8 start_ptr, end_ptr;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun if (esdhc->quirk_tuning_erratum_type1) {
1003*4882a593Smuzhiyun *window_start = 5 * esdhc->div_ratio;
1004*4882a593Smuzhiyun *window_end = 3 * esdhc->div_ratio;
1005*4882a593Smuzhiyun return;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun /* Reset data lines by setting ESDHCCTL[RSTD] */
1011*4882a593Smuzhiyun sdhci_reset(host, SDHCI_RESET_DATA);
1012*4882a593Smuzhiyun /* Write 32'hFFFF_FFFF to IRQSTAT register */
1013*4882a593Smuzhiyun sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2
1016*4882a593Smuzhiyun * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2,
1017*4882a593Smuzhiyun * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio
1018*4882a593Smuzhiyun * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio.
1019*4882a593Smuzhiyun */
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) {
1022*4882a593Smuzhiyun *window_start = 8 * esdhc->div_ratio;
1023*4882a593Smuzhiyun *window_end = 4 * esdhc->div_ratio;
1024*4882a593Smuzhiyun } else {
1025*4882a593Smuzhiyun *window_start = 5 * esdhc->div_ratio;
1026*4882a593Smuzhiyun *window_end = 3 * esdhc->div_ratio;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
esdhc_execute_sw_tuning(struct mmc_host * mmc,u32 opcode,u8 window_start,u8 window_end)1030*4882a593Smuzhiyun static int esdhc_execute_sw_tuning(struct mmc_host *mmc, u32 opcode,
1031*4882a593Smuzhiyun u8 window_start, u8 window_end)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun struct sdhci_host *host = mmc_priv(mmc);
1034*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1035*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1036*4882a593Smuzhiyun u32 val;
1037*4882a593Smuzhiyun int ret;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun /* Program TBPTR[TB_WNDW_END_PTR] and TBPTR[TB_WNDW_START_PTR] */
1040*4882a593Smuzhiyun val = ((u32)window_start << ESDHC_WNDW_STRT_PTR_SHIFT) &
1041*4882a593Smuzhiyun ESDHC_WNDW_STRT_PTR_MASK;
1042*4882a593Smuzhiyun val |= window_end & ESDHC_WNDW_END_PTR_MASK;
1043*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_TBPTR);
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun /* Program the software tuning mode by setting TBCTL[TB_MODE]=2'h3 */
1046*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBCTL);
1047*4882a593Smuzhiyun val &= ~ESDHC_TB_MODE_MASK;
1048*4882a593Smuzhiyun val |= ESDHC_TB_MODE_SW;
1049*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_TBCTL);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun esdhc->in_sw_tuning = true;
1052*4882a593Smuzhiyun ret = sdhci_execute_tuning(mmc, opcode);
1053*4882a593Smuzhiyun esdhc->in_sw_tuning = false;
1054*4882a593Smuzhiyun return ret;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
esdhc_execute_tuning(struct mmc_host * mmc,u32 opcode)1057*4882a593Smuzhiyun static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun struct sdhci_host *host = mmc_priv(mmc);
1060*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1061*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1062*4882a593Smuzhiyun u8 window_start, window_end;
1063*4882a593Smuzhiyun int ret, retries = 1;
1064*4882a593Smuzhiyun bool hs400_tuning;
1065*4882a593Smuzhiyun unsigned int clk;
1066*4882a593Smuzhiyun u32 val;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun /* For tuning mode, the sd clock divisor value
1069*4882a593Smuzhiyun * must be larger than 3 according to reference manual.
1070*4882a593Smuzhiyun */
1071*4882a593Smuzhiyun clk = esdhc->peripheral_clock / 3;
1072*4882a593Smuzhiyun if (host->clock > clk)
1073*4882a593Smuzhiyun esdhc_of_set_clock(host, clk);
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun esdhc_tuning_block_enable(host, true);
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * The eSDHC controller takes the data timeout value into account
1079*4882a593Smuzhiyun * during tuning. If the SD card is too slow sending the response, the
1080*4882a593Smuzhiyun * timer will expire and a "Buffer Read Ready" interrupt without data
1081*4882a593Smuzhiyun * is triggered. This leads to tuning errors.
1082*4882a593Smuzhiyun *
1083*4882a593Smuzhiyun * Just set the timeout to the maximum value because the core will
1084*4882a593Smuzhiyun * already take care of it in sdhci_send_tuning().
1085*4882a593Smuzhiyun */
1086*4882a593Smuzhiyun sdhci_writeb(host, 0xe, SDHCI_TIMEOUT_CONTROL);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun hs400_tuning = host->flags & SDHCI_HS400_TUNING;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun do {
1091*4882a593Smuzhiyun if (esdhc->quirk_limited_clk_division &&
1092*4882a593Smuzhiyun hs400_tuning)
1093*4882a593Smuzhiyun esdhc_of_set_clock(host, host->clock);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /* Do HW tuning */
1096*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBCTL);
1097*4882a593Smuzhiyun val &= ~ESDHC_TB_MODE_MASK;
1098*4882a593Smuzhiyun val |= ESDHC_TB_MODE_3;
1099*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_TBCTL);
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun ret = sdhci_execute_tuning(mmc, opcode);
1102*4882a593Smuzhiyun if (ret)
1103*4882a593Smuzhiyun break;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun /* For type2 affected platforms of the tuning erratum,
1106*4882a593Smuzhiyun * tuning may succeed although eSDHC might not have
1107*4882a593Smuzhiyun * tuned properly. Need to check tuning window.
1108*4882a593Smuzhiyun */
1109*4882a593Smuzhiyun if (esdhc->quirk_tuning_erratum_type2 &&
1110*4882a593Smuzhiyun !host->tuning_err) {
1111*4882a593Smuzhiyun esdhc_tuning_window_ptr(host, &window_start,
1112*4882a593Smuzhiyun &window_end);
1113*4882a593Smuzhiyun if (abs(window_start - window_end) >
1114*4882a593Smuzhiyun (4 * esdhc->div_ratio + 2))
1115*4882a593Smuzhiyun host->tuning_err = -EAGAIN;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun /* If HW tuning fails and triggers erratum,
1119*4882a593Smuzhiyun * try workaround.
1120*4882a593Smuzhiyun */
1121*4882a593Smuzhiyun ret = host->tuning_err;
1122*4882a593Smuzhiyun if (ret == -EAGAIN &&
1123*4882a593Smuzhiyun (esdhc->quirk_tuning_erratum_type1 ||
1124*4882a593Smuzhiyun esdhc->quirk_tuning_erratum_type2)) {
1125*4882a593Smuzhiyun /* Recover HS400 tuning flag */
1126*4882a593Smuzhiyun if (hs400_tuning)
1127*4882a593Smuzhiyun host->flags |= SDHCI_HS400_TUNING;
1128*4882a593Smuzhiyun pr_info("%s: Hold on to use fixed sampling clock. Try SW tuning!\n",
1129*4882a593Smuzhiyun mmc_hostname(mmc));
1130*4882a593Smuzhiyun /* Do SW tuning */
1131*4882a593Smuzhiyun esdhc_prepare_sw_tuning(host, &window_start,
1132*4882a593Smuzhiyun &window_end);
1133*4882a593Smuzhiyun ret = esdhc_execute_sw_tuning(mmc, opcode,
1134*4882a593Smuzhiyun window_start,
1135*4882a593Smuzhiyun window_end);
1136*4882a593Smuzhiyun if (ret)
1137*4882a593Smuzhiyun break;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /* Retry both HW/SW tuning with reduced clock. */
1140*4882a593Smuzhiyun ret = host->tuning_err;
1141*4882a593Smuzhiyun if (ret == -EAGAIN && retries) {
1142*4882a593Smuzhiyun /* Recover HS400 tuning flag */
1143*4882a593Smuzhiyun if (hs400_tuning)
1144*4882a593Smuzhiyun host->flags |= SDHCI_HS400_TUNING;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun clk = host->max_clk / (esdhc->div_ratio + 1);
1147*4882a593Smuzhiyun esdhc_of_set_clock(host, clk);
1148*4882a593Smuzhiyun pr_info("%s: Hold on to use fixed sampling clock. Try tuning with reduced clock!\n",
1149*4882a593Smuzhiyun mmc_hostname(mmc));
1150*4882a593Smuzhiyun } else {
1151*4882a593Smuzhiyun break;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun } else {
1154*4882a593Smuzhiyun break;
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun } while (retries--);
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun if (ret) {
1159*4882a593Smuzhiyun esdhc_tuning_block_enable(host, false);
1160*4882a593Smuzhiyun } else if (hs400_tuning) {
1161*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1162*4882a593Smuzhiyun val |= ESDHC_FLW_CTL_BG;
1163*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun return ret;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
esdhc_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)1169*4882a593Smuzhiyun static void esdhc_set_uhs_signaling(struct sdhci_host *host,
1170*4882a593Smuzhiyun unsigned int timing)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun u32 val;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun /*
1175*4882a593Smuzhiyun * There are specific registers setting for HS400 mode.
1176*4882a593Smuzhiyun * Clean all of them if controller is in HS400 mode to
1177*4882a593Smuzhiyun * exit HS400 mode before re-setting any speed mode.
1178*4882a593Smuzhiyun */
1179*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBCTL);
1180*4882a593Smuzhiyun if (val & ESDHC_HS400_MODE) {
1181*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_SDTIMNGCTL);
1182*4882a593Smuzhiyun val &= ~ESDHC_FLW_CTL_BG;
1183*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_SDTIMNGCTL);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_SDCLKCTL);
1186*4882a593Smuzhiyun val &= ~ESDHC_CMD_CLK_CTL;
1187*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_SDCLKCTL);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun esdhc_clock_enable(host, false);
1190*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBCTL);
1191*4882a593Smuzhiyun val &= ~ESDHC_HS400_MODE;
1192*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_TBCTL);
1193*4882a593Smuzhiyun esdhc_clock_enable(host, true);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_DLLCFG0);
1196*4882a593Smuzhiyun val &= ~(ESDHC_DLL_ENABLE | ESDHC_DLL_FREQ_SEL);
1197*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_DLLCFG0);
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_TBCTL);
1200*4882a593Smuzhiyun val &= ~ESDHC_HS400_WNDW_ADJUST;
1201*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_TBCTL);
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun esdhc_tuning_block_enable(host, false);
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (timing == MMC_TIMING_MMC_HS400)
1207*4882a593Smuzhiyun esdhc_tuning_block_enable(host, true);
1208*4882a593Smuzhiyun else
1209*4882a593Smuzhiyun sdhci_set_uhs_signaling(host, timing);
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
esdhc_irq(struct sdhci_host * host,u32 intmask)1212*4882a593Smuzhiyun static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1215*4882a593Smuzhiyun struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
1216*4882a593Smuzhiyun u32 command;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun if (esdhc->quirk_trans_complete_erratum) {
1219*4882a593Smuzhiyun command = SDHCI_GET_CMD(sdhci_readw(host,
1220*4882a593Smuzhiyun SDHCI_COMMAND));
1221*4882a593Smuzhiyun if (command == MMC_WRITE_MULTIPLE_BLOCK &&
1222*4882a593Smuzhiyun sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
1223*4882a593Smuzhiyun intmask & SDHCI_INT_DATA_END) {
1224*4882a593Smuzhiyun intmask &= ~SDHCI_INT_DATA_END;
1225*4882a593Smuzhiyun sdhci_writel(host, SDHCI_INT_DATA_END,
1226*4882a593Smuzhiyun SDHCI_INT_STATUS);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun return intmask;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
1233*4882a593Smuzhiyun static u32 esdhc_proctl;
esdhc_of_suspend(struct device * dev)1234*4882a593Smuzhiyun static int esdhc_of_suspend(struct device *dev)
1235*4882a593Smuzhiyun {
1236*4882a593Smuzhiyun struct sdhci_host *host = dev_get_drvdata(dev);
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun if (host->tuning_mode != SDHCI_TUNING_MODE_3)
1241*4882a593Smuzhiyun mmc_retune_needed(host->mmc);
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun return sdhci_suspend_host(host);
1244*4882a593Smuzhiyun }
1245*4882a593Smuzhiyun
esdhc_of_resume(struct device * dev)1246*4882a593Smuzhiyun static int esdhc_of_resume(struct device *dev)
1247*4882a593Smuzhiyun {
1248*4882a593Smuzhiyun struct sdhci_host *host = dev_get_drvdata(dev);
1249*4882a593Smuzhiyun int ret = sdhci_resume_host(host);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun if (ret == 0) {
1252*4882a593Smuzhiyun /* Isn't this already done by sdhci_resume_host() ? --rmk */
1253*4882a593Smuzhiyun esdhc_of_enable_dma(host);
1254*4882a593Smuzhiyun sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun return ret;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun #endif
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(esdhc_of_dev_pm_ops,
1261*4882a593Smuzhiyun esdhc_of_suspend,
1262*4882a593Smuzhiyun esdhc_of_resume);
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun static const struct sdhci_ops sdhci_esdhc_be_ops = {
1265*4882a593Smuzhiyun .read_l = esdhc_be_readl,
1266*4882a593Smuzhiyun .read_w = esdhc_be_readw,
1267*4882a593Smuzhiyun .read_b = esdhc_be_readb,
1268*4882a593Smuzhiyun .write_l = esdhc_be_writel,
1269*4882a593Smuzhiyun .write_w = esdhc_be_writew,
1270*4882a593Smuzhiyun .write_b = esdhc_be_writeb,
1271*4882a593Smuzhiyun .set_clock = esdhc_of_set_clock,
1272*4882a593Smuzhiyun .enable_dma = esdhc_of_enable_dma,
1273*4882a593Smuzhiyun .get_max_clock = esdhc_of_get_max_clock,
1274*4882a593Smuzhiyun .get_min_clock = esdhc_of_get_min_clock,
1275*4882a593Smuzhiyun .adma_workaround = esdhc_of_adma_workaround,
1276*4882a593Smuzhiyun .set_bus_width = esdhc_pltfm_set_bus_width,
1277*4882a593Smuzhiyun .reset = esdhc_reset,
1278*4882a593Smuzhiyun .set_uhs_signaling = esdhc_set_uhs_signaling,
1279*4882a593Smuzhiyun .irq = esdhc_irq,
1280*4882a593Smuzhiyun };
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun static const struct sdhci_ops sdhci_esdhc_le_ops = {
1283*4882a593Smuzhiyun .read_l = esdhc_le_readl,
1284*4882a593Smuzhiyun .read_w = esdhc_le_readw,
1285*4882a593Smuzhiyun .read_b = esdhc_le_readb,
1286*4882a593Smuzhiyun .write_l = esdhc_le_writel,
1287*4882a593Smuzhiyun .write_w = esdhc_le_writew,
1288*4882a593Smuzhiyun .write_b = esdhc_le_writeb,
1289*4882a593Smuzhiyun .set_clock = esdhc_of_set_clock,
1290*4882a593Smuzhiyun .enable_dma = esdhc_of_enable_dma,
1291*4882a593Smuzhiyun .get_max_clock = esdhc_of_get_max_clock,
1292*4882a593Smuzhiyun .get_min_clock = esdhc_of_get_min_clock,
1293*4882a593Smuzhiyun .adma_workaround = esdhc_of_adma_workaround,
1294*4882a593Smuzhiyun .set_bus_width = esdhc_pltfm_set_bus_width,
1295*4882a593Smuzhiyun .reset = esdhc_reset,
1296*4882a593Smuzhiyun .set_uhs_signaling = esdhc_set_uhs_signaling,
1297*4882a593Smuzhiyun .irq = esdhc_irq,
1298*4882a593Smuzhiyun };
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
1301*4882a593Smuzhiyun .quirks = ESDHC_DEFAULT_QUIRKS |
1302*4882a593Smuzhiyun #ifdef CONFIG_PPC
1303*4882a593Smuzhiyun SDHCI_QUIRK_BROKEN_CARD_DETECTION |
1304*4882a593Smuzhiyun #endif
1305*4882a593Smuzhiyun SDHCI_QUIRK_NO_CARD_NO_RESET |
1306*4882a593Smuzhiyun SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1307*4882a593Smuzhiyun .ops = &sdhci_esdhc_be_ops,
1308*4882a593Smuzhiyun };
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
1311*4882a593Smuzhiyun .quirks = ESDHC_DEFAULT_QUIRKS |
1312*4882a593Smuzhiyun SDHCI_QUIRK_NO_CARD_NO_RESET |
1313*4882a593Smuzhiyun SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
1314*4882a593Smuzhiyun .ops = &sdhci_esdhc_le_ops,
1315*4882a593Smuzhiyun };
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun static struct soc_device_attribute soc_incorrect_hostver[] = {
1318*4882a593Smuzhiyun { .family = "QorIQ T4240", .revision = "1.0", },
1319*4882a593Smuzhiyun { .family = "QorIQ T4240", .revision = "2.0", },
1320*4882a593Smuzhiyun { },
1321*4882a593Smuzhiyun };
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1324*4882a593Smuzhiyun { .family = "QorIQ LX2160A", .revision = "1.0", },
1325*4882a593Smuzhiyun { .family = "QorIQ LX2160A", .revision = "2.0", },
1326*4882a593Smuzhiyun { .family = "QorIQ LS1028A", .revision = "1.0", },
1327*4882a593Smuzhiyun { },
1328*4882a593Smuzhiyun };
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1331*4882a593Smuzhiyun { .family = "QorIQ LX2160A", .revision = "1.0", },
1332*4882a593Smuzhiyun { .family = "QorIQ LX2160A", .revision = "2.0", },
1333*4882a593Smuzhiyun { .family = "QorIQ LS1028A", .revision = "1.0", },
1334*4882a593Smuzhiyun { },
1335*4882a593Smuzhiyun };
1336*4882a593Smuzhiyun
esdhc_init(struct platform_device * pdev,struct sdhci_host * host)1337*4882a593Smuzhiyun static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun const struct of_device_id *match;
1340*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host;
1341*4882a593Smuzhiyun struct sdhci_esdhc *esdhc;
1342*4882a593Smuzhiyun struct device_node *np;
1343*4882a593Smuzhiyun struct clk *clk;
1344*4882a593Smuzhiyun u32 val;
1345*4882a593Smuzhiyun u16 host_ver;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun pltfm_host = sdhci_priv(host);
1348*4882a593Smuzhiyun esdhc = sdhci_pltfm_priv(pltfm_host);
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
1351*4882a593Smuzhiyun esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
1352*4882a593Smuzhiyun SDHCI_VENDOR_VER_SHIFT;
1353*4882a593Smuzhiyun esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
1354*4882a593Smuzhiyun if (soc_device_match(soc_incorrect_hostver))
1355*4882a593Smuzhiyun esdhc->quirk_incorrect_hostver = true;
1356*4882a593Smuzhiyun else
1357*4882a593Smuzhiyun esdhc->quirk_incorrect_hostver = false;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun if (soc_device_match(soc_fixup_sdhc_clkdivs))
1360*4882a593Smuzhiyun esdhc->quirk_limited_clk_division = true;
1361*4882a593Smuzhiyun else
1362*4882a593Smuzhiyun esdhc->quirk_limited_clk_division = false;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun if (soc_device_match(soc_unreliable_pulse_detection))
1365*4882a593Smuzhiyun esdhc->quirk_unreliable_pulse_detection = true;
1366*4882a593Smuzhiyun else
1367*4882a593Smuzhiyun esdhc->quirk_unreliable_pulse_detection = false;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun match = of_match_node(sdhci_esdhc_of_match, pdev->dev.of_node);
1370*4882a593Smuzhiyun if (match)
1371*4882a593Smuzhiyun esdhc->clk_fixup = match->data;
1372*4882a593Smuzhiyun np = pdev->dev.of_node;
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1375*4882a593Smuzhiyun esdhc->quirk_delay_before_data_reset = true;
1376*4882a593Smuzhiyun esdhc->quirk_trans_complete_erratum = true;
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun clk = of_clk_get(np, 0);
1380*4882a593Smuzhiyun if (!IS_ERR(clk)) {
1381*4882a593Smuzhiyun /*
1382*4882a593Smuzhiyun * esdhc->peripheral_clock would be assigned with a value
1383*4882a593Smuzhiyun * which is eSDHC base clock when use periperal clock.
1384*4882a593Smuzhiyun * For some platforms, the clock value got by common clk
1385*4882a593Smuzhiyun * API is peripheral clock while the eSDHC base clock is
1386*4882a593Smuzhiyun * 1/2 peripheral clock.
1387*4882a593Smuzhiyun */
1388*4882a593Smuzhiyun if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") ||
1389*4882a593Smuzhiyun of_device_is_compatible(np, "fsl,ls1028a-esdhc") ||
1390*4882a593Smuzhiyun of_device_is_compatible(np, "fsl,ls1088a-esdhc"))
1391*4882a593Smuzhiyun esdhc->peripheral_clock = clk_get_rate(clk) / 2;
1392*4882a593Smuzhiyun else
1393*4882a593Smuzhiyun esdhc->peripheral_clock = clk_get_rate(clk);
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun clk_put(clk);
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun esdhc_clock_enable(host, false);
1399*4882a593Smuzhiyun val = sdhci_readl(host, ESDHC_DMA_SYSCTL);
1400*4882a593Smuzhiyun /*
1401*4882a593Smuzhiyun * This bit is not able to be reset by SDHCI_RESET_ALL. Need to
1402*4882a593Smuzhiyun * initialize it as 1 or 0 once, to override the different value
1403*4882a593Smuzhiyun * which may be configured in bootloader.
1404*4882a593Smuzhiyun */
1405*4882a593Smuzhiyun if (esdhc->peripheral_clock)
1406*4882a593Smuzhiyun val |= ESDHC_PERIPHERAL_CLK_SEL;
1407*4882a593Smuzhiyun else
1408*4882a593Smuzhiyun val &= ~ESDHC_PERIPHERAL_CLK_SEL;
1409*4882a593Smuzhiyun sdhci_writel(host, val, ESDHC_DMA_SYSCTL);
1410*4882a593Smuzhiyun esdhc_clock_enable(host, true);
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
esdhc_hs400_prepare_ddr(struct mmc_host * mmc)1413*4882a593Smuzhiyun static int esdhc_hs400_prepare_ddr(struct mmc_host *mmc)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun esdhc_tuning_block_enable(mmc_priv(mmc), false);
1416*4882a593Smuzhiyun return 0;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun
sdhci_esdhc_probe(struct platform_device * pdev)1419*4882a593Smuzhiyun static int sdhci_esdhc_probe(struct platform_device *pdev)
1420*4882a593Smuzhiyun {
1421*4882a593Smuzhiyun struct sdhci_host *host;
1422*4882a593Smuzhiyun struct device_node *np;
1423*4882a593Smuzhiyun struct sdhci_pltfm_host *pltfm_host;
1424*4882a593Smuzhiyun struct sdhci_esdhc *esdhc;
1425*4882a593Smuzhiyun int ret;
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun np = pdev->dev.of_node;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun if (of_property_read_bool(np, "little-endian"))
1430*4882a593Smuzhiyun host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata,
1431*4882a593Smuzhiyun sizeof(struct sdhci_esdhc));
1432*4882a593Smuzhiyun else
1433*4882a593Smuzhiyun host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata,
1434*4882a593Smuzhiyun sizeof(struct sdhci_esdhc));
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun if (IS_ERR(host))
1437*4882a593Smuzhiyun return PTR_ERR(host);
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun host->mmc_host_ops.start_signal_voltage_switch =
1440*4882a593Smuzhiyun esdhc_signal_voltage_switch;
1441*4882a593Smuzhiyun host->mmc_host_ops.execute_tuning = esdhc_execute_tuning;
1442*4882a593Smuzhiyun host->mmc_host_ops.hs400_prepare_ddr = esdhc_hs400_prepare_ddr;
1443*4882a593Smuzhiyun host->tuning_delay = 1;
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun esdhc_init(pdev, host);
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun sdhci_get_of_property(pdev);
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun pltfm_host = sdhci_priv(host);
1450*4882a593Smuzhiyun esdhc = sdhci_pltfm_priv(pltfm_host);
1451*4882a593Smuzhiyun if (soc_device_match(soc_tuning_erratum_type1))
1452*4882a593Smuzhiyun esdhc->quirk_tuning_erratum_type1 = true;
1453*4882a593Smuzhiyun else
1454*4882a593Smuzhiyun esdhc->quirk_tuning_erratum_type1 = false;
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun if (soc_device_match(soc_tuning_erratum_type2))
1457*4882a593Smuzhiyun esdhc->quirk_tuning_erratum_type2 = true;
1458*4882a593Smuzhiyun else
1459*4882a593Smuzhiyun esdhc->quirk_tuning_erratum_type2 = false;
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun if (esdhc->vendor_ver == VENDOR_V_22)
1462*4882a593Smuzhiyun host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (esdhc->vendor_ver > VENDOR_V_22)
1465*4882a593Smuzhiyun host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
1468*4882a593Smuzhiyun host->quirks |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
1469*4882a593Smuzhiyun host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
1473*4882a593Smuzhiyun of_device_is_compatible(np, "fsl,p5020-esdhc") ||
1474*4882a593Smuzhiyun of_device_is_compatible(np, "fsl,p4080-esdhc") ||
1475*4882a593Smuzhiyun of_device_is_compatible(np, "fsl,p1020-esdhc") ||
1476*4882a593Smuzhiyun of_device_is_compatible(np, "fsl,t1040-esdhc"))
1477*4882a593Smuzhiyun host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
1480*4882a593Smuzhiyun host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun esdhc->quirk_ignore_data_inhibit = false;
1483*4882a593Smuzhiyun if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
1484*4882a593Smuzhiyun /*
1485*4882a593Smuzhiyun * Freescale messed up with P2020 as it has a non-standard
1486*4882a593Smuzhiyun * host control register
1487*4882a593Smuzhiyun */
1488*4882a593Smuzhiyun host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
1489*4882a593Smuzhiyun esdhc->quirk_ignore_data_inhibit = true;
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun /* call to generic mmc_of_parse to support additional capabilities */
1493*4882a593Smuzhiyun ret = mmc_of_parse(host->mmc);
1494*4882a593Smuzhiyun if (ret)
1495*4882a593Smuzhiyun goto err;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun mmc_of_parse_voltage(np, &host->ocr_mask);
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun ret = sdhci_add_host(host);
1500*4882a593Smuzhiyun if (ret)
1501*4882a593Smuzhiyun goto err;
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun return 0;
1504*4882a593Smuzhiyun err:
1505*4882a593Smuzhiyun sdhci_pltfm_free(pdev);
1506*4882a593Smuzhiyun return ret;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun static struct platform_driver sdhci_esdhc_driver = {
1510*4882a593Smuzhiyun .driver = {
1511*4882a593Smuzhiyun .name = "sdhci-esdhc",
1512*4882a593Smuzhiyun .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1513*4882a593Smuzhiyun .of_match_table = sdhci_esdhc_of_match,
1514*4882a593Smuzhiyun .pm = &esdhc_of_dev_pm_ops,
1515*4882a593Smuzhiyun },
1516*4882a593Smuzhiyun .probe = sdhci_esdhc_probe,
1517*4882a593Smuzhiyun .remove = sdhci_pltfm_unregister,
1518*4882a593Smuzhiyun };
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun module_platform_driver(sdhci_esdhc_driver);
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
1523*4882a593Smuzhiyun MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
1524*4882a593Smuzhiyun "Anton Vorontsov <avorontsov@ru.mvista.com>");
1525*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1526