xref: /OK3568_Linux_fs/kernel/drivers/mmc/host/sdhci-tegra.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2010 Google, Inc.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/delay.h>
7*4882a593Smuzhiyun #include <linux/dma-mapping.h>
8*4882a593Smuzhiyun #include <linux/err.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/iopoll.h>
12*4882a593Smuzhiyun #include <linux/platform_device.h>
13*4882a593Smuzhiyun #include <linux/clk.h>
14*4882a593Smuzhiyun #include <linux/io.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/of_device.h>
17*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
18*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
19*4882a593Smuzhiyun #include <linux/reset.h>
20*4882a593Smuzhiyun #include <linux/mmc/card.h>
21*4882a593Smuzhiyun #include <linux/mmc/host.h>
22*4882a593Smuzhiyun #include <linux/mmc/mmc.h>
23*4882a593Smuzhiyun #include <linux/mmc/slot-gpio.h>
24*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
25*4882a593Smuzhiyun #include <linux/ktime.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "sdhci-cqhci.h"
28*4882a593Smuzhiyun #include "sdhci-pltfm.h"
29*4882a593Smuzhiyun #include "cqhci.h"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* Tegra SDHOST controller vendor register definitions */
32*4882a593Smuzhiyun #define SDHCI_TEGRA_VENDOR_CLOCK_CTRL			0x100
33*4882a593Smuzhiyun #define SDHCI_CLOCK_CTRL_TAP_MASK			0x00ff0000
34*4882a593Smuzhiyun #define SDHCI_CLOCK_CTRL_TAP_SHIFT			16
35*4882a593Smuzhiyun #define SDHCI_CLOCK_CTRL_TRIM_MASK			0x1f000000
36*4882a593Smuzhiyun #define SDHCI_CLOCK_CTRL_TRIM_SHIFT			24
37*4882a593Smuzhiyun #define SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE		BIT(5)
38*4882a593Smuzhiyun #define SDHCI_CLOCK_CTRL_PADPIPE_CLKEN_OVERRIDE		BIT(3)
39*4882a593Smuzhiyun #define SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE	BIT(2)
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define SDHCI_TEGRA_VENDOR_SYS_SW_CTRL			0x104
42*4882a593Smuzhiyun #define SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE		BIT(31)
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define SDHCI_TEGRA_VENDOR_CAP_OVERRIDES		0x10c
45*4882a593Smuzhiyun #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK		0x00003f00
46*4882a593Smuzhiyun #define SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT	8
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define SDHCI_TEGRA_VENDOR_MISC_CTRL			0x120
49*4882a593Smuzhiyun #define SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT		BIT(0)
50*4882a593Smuzhiyun #define SDHCI_MISC_CTRL_ENABLE_SDR104			0x8
51*4882a593Smuzhiyun #define SDHCI_MISC_CTRL_ENABLE_SDR50			0x10
52*4882a593Smuzhiyun #define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300		0x20
53*4882a593Smuzhiyun #define SDHCI_MISC_CTRL_ENABLE_DDR50			0x200
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define SDHCI_TEGRA_VENDOR_DLLCAL_CFG			0x1b0
56*4882a593Smuzhiyun #define SDHCI_TEGRA_DLLCAL_CALIBRATE			BIT(31)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define SDHCI_TEGRA_VENDOR_DLLCAL_STA			0x1bc
59*4882a593Smuzhiyun #define SDHCI_TEGRA_DLLCAL_STA_ACTIVE			BIT(31)
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define SDHCI_VNDR_TUN_CTRL0_0				0x1c0
62*4882a593Smuzhiyun #define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP			0x20000
63*4882a593Smuzhiyun #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK		0x03fc0000
64*4882a593Smuzhiyun #define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT	18
65*4882a593Smuzhiyun #define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK			0x00001fc0
66*4882a593Smuzhiyun #define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT		6
67*4882a593Smuzhiyun #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK		0x000e000
68*4882a593Smuzhiyun #define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT		13
69*4882a593Smuzhiyun #define TRIES_128					2
70*4882a593Smuzhiyun #define TRIES_256					4
71*4882a593Smuzhiyun #define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK		0x7
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #define SDHCI_TEGRA_VNDR_TUN_CTRL1_0			0x1c4
74*4882a593Smuzhiyun #define SDHCI_TEGRA_VNDR_TUN_STATUS0			0x1C8
75*4882a593Smuzhiyun #define SDHCI_TEGRA_VNDR_TUN_STATUS1			0x1CC
76*4882a593Smuzhiyun #define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK		0xFF
77*4882a593Smuzhiyun #define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT	0x8
78*4882a593Smuzhiyun #define TUNING_WORD_BIT_SIZE				32
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #define SDHCI_TEGRA_AUTO_CAL_CONFIG			0x1e4
81*4882a593Smuzhiyun #define SDHCI_AUTO_CAL_START				BIT(31)
82*4882a593Smuzhiyun #define SDHCI_AUTO_CAL_ENABLE				BIT(29)
83*4882a593Smuzhiyun #define SDHCI_AUTO_CAL_PDPU_OFFSET_MASK			0x0000ffff
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL			0x1e0
86*4882a593Smuzhiyun #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK	0x0000000f
87*4882a593Smuzhiyun #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL	0x7
88*4882a593Smuzhiyun #define SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD	BIT(31)
89*4882a593Smuzhiyun #define SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK		0x07FFF000
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define SDHCI_TEGRA_AUTO_CAL_STATUS			0x1ec
92*4882a593Smuzhiyun #define SDHCI_TEGRA_AUTO_CAL_ACTIVE			BIT(31)
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define NVQUIRK_FORCE_SDHCI_SPEC_200			BIT(0)
95*4882a593Smuzhiyun #define NVQUIRK_ENABLE_BLOCK_GAP_DET			BIT(1)
96*4882a593Smuzhiyun #define NVQUIRK_ENABLE_SDHCI_SPEC_300			BIT(2)
97*4882a593Smuzhiyun #define NVQUIRK_ENABLE_SDR50				BIT(3)
98*4882a593Smuzhiyun #define NVQUIRK_ENABLE_SDR104				BIT(4)
99*4882a593Smuzhiyun #define NVQUIRK_ENABLE_DDR50				BIT(5)
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun  * HAS_PADCALIB NVQUIRK is for SoC's supporting auto calibration of pads
102*4882a593Smuzhiyun  * drive strength.
103*4882a593Smuzhiyun  */
104*4882a593Smuzhiyun #define NVQUIRK_HAS_PADCALIB				BIT(6)
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun  * NEEDS_PAD_CONTROL NVQUIRK is for SoC's having separate 3V3 and 1V8 pads.
107*4882a593Smuzhiyun  * 3V3/1V8 pad selection happens through pinctrl state selection depending
108*4882a593Smuzhiyun  * on the signaling mode.
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun #define NVQUIRK_NEEDS_PAD_CONTROL			BIT(7)
111*4882a593Smuzhiyun #define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP			BIT(8)
112*4882a593Smuzhiyun #define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING		BIT(9)
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
116*4882a593Smuzhiyun  * SDMMC hardware data timeout.
117*4882a593Smuzhiyun  */
118*4882a593Smuzhiyun #define NVQUIRK_HAS_TMCLK				BIT(10)
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
121*4882a593Smuzhiyun #define SDHCI_TEGRA_CQE_BASE_ADDR			0xF000
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define SDHCI_TEGRA_CQE_TRNS_MODE	(SDHCI_TRNS_MULTI | \
124*4882a593Smuzhiyun 					 SDHCI_TRNS_BLK_CNT_EN | \
125*4882a593Smuzhiyun 					 SDHCI_TRNS_DMA)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun struct sdhci_tegra_soc_data {
128*4882a593Smuzhiyun 	const struct sdhci_pltfm_data *pdata;
129*4882a593Smuzhiyun 	u64 dma_mask;
130*4882a593Smuzhiyun 	u32 nvquirks;
131*4882a593Smuzhiyun 	u8 min_tap_delay;
132*4882a593Smuzhiyun 	u8 max_tap_delay;
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /* Magic pull up and pull down pad calibration offsets */
136*4882a593Smuzhiyun struct sdhci_tegra_autocal_offsets {
137*4882a593Smuzhiyun 	u32 pull_up_3v3;
138*4882a593Smuzhiyun 	u32 pull_down_3v3;
139*4882a593Smuzhiyun 	u32 pull_up_3v3_timeout;
140*4882a593Smuzhiyun 	u32 pull_down_3v3_timeout;
141*4882a593Smuzhiyun 	u32 pull_up_1v8;
142*4882a593Smuzhiyun 	u32 pull_down_1v8;
143*4882a593Smuzhiyun 	u32 pull_up_1v8_timeout;
144*4882a593Smuzhiyun 	u32 pull_down_1v8_timeout;
145*4882a593Smuzhiyun 	u32 pull_up_sdr104;
146*4882a593Smuzhiyun 	u32 pull_down_sdr104;
147*4882a593Smuzhiyun 	u32 pull_up_hs400;
148*4882a593Smuzhiyun 	u32 pull_down_hs400;
149*4882a593Smuzhiyun };
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun struct sdhci_tegra {
152*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc_data;
153*4882a593Smuzhiyun 	struct gpio_desc *power_gpio;
154*4882a593Smuzhiyun 	struct clk *tmclk;
155*4882a593Smuzhiyun 	bool ddr_signaling;
156*4882a593Smuzhiyun 	bool pad_calib_required;
157*4882a593Smuzhiyun 	bool pad_control_available;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	struct reset_control *rst;
160*4882a593Smuzhiyun 	struct pinctrl *pinctrl_sdmmc;
161*4882a593Smuzhiyun 	struct pinctrl_state *pinctrl_state_3v3;
162*4882a593Smuzhiyun 	struct pinctrl_state *pinctrl_state_1v8;
163*4882a593Smuzhiyun 	struct pinctrl_state *pinctrl_state_3v3_drv;
164*4882a593Smuzhiyun 	struct pinctrl_state *pinctrl_state_1v8_drv;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	struct sdhci_tegra_autocal_offsets autocal_offsets;
167*4882a593Smuzhiyun 	ktime_t last_calib;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	u32 default_tap;
170*4882a593Smuzhiyun 	u32 default_trim;
171*4882a593Smuzhiyun 	u32 dqs_trim;
172*4882a593Smuzhiyun 	bool enable_hwcq;
173*4882a593Smuzhiyun 	unsigned long curr_clk_rate;
174*4882a593Smuzhiyun 	u8 tuned_tap_delay;
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun 
tegra_sdhci_readw(struct sdhci_host * host,int reg)177*4882a593Smuzhiyun static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
180*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
181*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
184*4882a593Smuzhiyun 			(reg == SDHCI_HOST_VERSION))) {
185*4882a593Smuzhiyun 		/* Erratum: Version register is invalid in HW. */
186*4882a593Smuzhiyun 		return SDHCI_SPEC_200;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	return readw(host->ioaddr + reg);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
tegra_sdhci_writew(struct sdhci_host * host,u16 val,int reg)192*4882a593Smuzhiyun static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	switch (reg) {
197*4882a593Smuzhiyun 	case SDHCI_TRANSFER_MODE:
198*4882a593Smuzhiyun 		/*
199*4882a593Smuzhiyun 		 * Postpone this write, we must do it together with a
200*4882a593Smuzhiyun 		 * command write that is down below.
201*4882a593Smuzhiyun 		 */
202*4882a593Smuzhiyun 		pltfm_host->xfer_mode_shadow = val;
203*4882a593Smuzhiyun 		return;
204*4882a593Smuzhiyun 	case SDHCI_COMMAND:
205*4882a593Smuzhiyun 		writel((val << 16) | pltfm_host->xfer_mode_shadow,
206*4882a593Smuzhiyun 			host->ioaddr + SDHCI_TRANSFER_MODE);
207*4882a593Smuzhiyun 		return;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	writew(val, host->ioaddr + reg);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
tegra_sdhci_writel(struct sdhci_host * host,u32 val,int reg)213*4882a593Smuzhiyun static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
216*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
217*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* Seems like we're getting spurious timeout and crc errors, so
220*4882a593Smuzhiyun 	 * disable signalling of them. In case of real errors software
221*4882a593Smuzhiyun 	 * timers should take care of eventually detecting them.
222*4882a593Smuzhiyun 	 */
223*4882a593Smuzhiyun 	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
224*4882a593Smuzhiyun 		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	writel(val, host->ioaddr + reg);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
229*4882a593Smuzhiyun 			(reg == SDHCI_INT_ENABLE))) {
230*4882a593Smuzhiyun 		/* Erratum: Must enable block gap interrupt detection */
231*4882a593Smuzhiyun 		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
232*4882a593Smuzhiyun 		if (val & SDHCI_INT_CARD_INT)
233*4882a593Smuzhiyun 			gap_ctrl |= 0x8;
234*4882a593Smuzhiyun 		else
235*4882a593Smuzhiyun 			gap_ctrl &= ~0x8;
236*4882a593Smuzhiyun 		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
tegra_sdhci_configure_card_clk(struct sdhci_host * host,bool enable)240*4882a593Smuzhiyun static bool tegra_sdhci_configure_card_clk(struct sdhci_host *host, bool enable)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	bool status;
243*4882a593Smuzhiyun 	u32 reg;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
246*4882a593Smuzhiyun 	status = !!(reg & SDHCI_CLOCK_CARD_EN);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (status == enable)
249*4882a593Smuzhiyun 		return status;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (enable)
252*4882a593Smuzhiyun 		reg |= SDHCI_CLOCK_CARD_EN;
253*4882a593Smuzhiyun 	else
254*4882a593Smuzhiyun 		reg &= ~SDHCI_CLOCK_CARD_EN;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	sdhci_writew(host, reg, SDHCI_CLOCK_CONTROL);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return status;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
tegra210_sdhci_writew(struct sdhci_host * host,u16 val,int reg)261*4882a593Smuzhiyun static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	bool is_tuning_cmd = 0;
264*4882a593Smuzhiyun 	bool clk_enabled;
265*4882a593Smuzhiyun 	u8 cmd;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (reg == SDHCI_COMMAND) {
268*4882a593Smuzhiyun 		cmd = SDHCI_GET_CMD(val);
269*4882a593Smuzhiyun 		is_tuning_cmd = cmd == MMC_SEND_TUNING_BLOCK ||
270*4882a593Smuzhiyun 				cmd == MMC_SEND_TUNING_BLOCK_HS200;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (is_tuning_cmd)
274*4882a593Smuzhiyun 		clk_enabled = tegra_sdhci_configure_card_clk(host, 0);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	writew(val, host->ioaddr + reg);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (is_tuning_cmd) {
279*4882a593Smuzhiyun 		udelay(1);
280*4882a593Smuzhiyun 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
281*4882a593Smuzhiyun 		tegra_sdhci_configure_card_clk(host, clk_enabled);
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
tegra_sdhci_get_ro(struct sdhci_host * host)285*4882a593Smuzhiyun static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	/*
288*4882a593Smuzhiyun 	 * Write-enable shall be assumed if GPIO is missing in a board's
289*4882a593Smuzhiyun 	 * device-tree because SDHCI's WRITE_PROTECT bit doesn't work on
290*4882a593Smuzhiyun 	 * Tegra.
291*4882a593Smuzhiyun 	 */
292*4882a593Smuzhiyun 	return mmc_gpio_get_ro(host->mmc);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host * host)295*4882a593Smuzhiyun static bool tegra_sdhci_is_pad_and_regulator_valid(struct sdhci_host *host)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
298*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
299*4882a593Smuzhiyun 	int has_1v8, has_3v3;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/*
302*4882a593Smuzhiyun 	 * The SoCs which have NVQUIRK_NEEDS_PAD_CONTROL require software pad
303*4882a593Smuzhiyun 	 * voltage configuration in order to perform voltage switching. This
304*4882a593Smuzhiyun 	 * means that valid pinctrl info is required on SDHCI instances capable
305*4882a593Smuzhiyun 	 * of performing voltage switching. Whether or not an SDHCI instance is
306*4882a593Smuzhiyun 	 * capable of voltage switching is determined based on the regulator.
307*4882a593Smuzhiyun 	 */
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
310*4882a593Smuzhiyun 		return true;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (IS_ERR(host->mmc->supply.vqmmc))
313*4882a593Smuzhiyun 		return false;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	has_1v8 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
316*4882a593Smuzhiyun 						 1700000, 1950000);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	has_3v3 = regulator_is_supported_voltage(host->mmc->supply.vqmmc,
319*4882a593Smuzhiyun 						 2700000, 3600000);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (has_1v8 == 1 && has_3v3 == 1)
322*4882a593Smuzhiyun 		return tegra_host->pad_control_available;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/* Fixed voltage, no pad control required. */
325*4882a593Smuzhiyun 	return true;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
tegra_sdhci_set_tap(struct sdhci_host * host,unsigned int tap)328*4882a593Smuzhiyun static void tegra_sdhci_set_tap(struct sdhci_host *host, unsigned int tap)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
331*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
332*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
333*4882a593Smuzhiyun 	bool card_clk_enabled = false;
334*4882a593Smuzhiyun 	u32 reg;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/*
337*4882a593Smuzhiyun 	 * Touching the tap values is a bit tricky on some SoC generations.
338*4882a593Smuzhiyun 	 * The quirk enables a workaround for a glitch that sometimes occurs if
339*4882a593Smuzhiyun 	 * the tap values are changed.
340*4882a593Smuzhiyun 	 */
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP)
343*4882a593Smuzhiyun 		card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
346*4882a593Smuzhiyun 	reg &= ~SDHCI_CLOCK_CTRL_TAP_MASK;
347*4882a593Smuzhiyun 	reg |= tap << SDHCI_CLOCK_CTRL_TAP_SHIFT;
348*4882a593Smuzhiyun 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	if (soc_data->nvquirks & NVQUIRK_DIS_CARD_CLK_CONFIG_TAP &&
351*4882a593Smuzhiyun 	    card_clk_enabled) {
352*4882a593Smuzhiyun 		udelay(1);
353*4882a593Smuzhiyun 		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
354*4882a593Smuzhiyun 		tegra_sdhci_configure_card_clk(host, card_clk_enabled);
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
tegra_sdhci_reset(struct sdhci_host * host,u8 mask)358*4882a593Smuzhiyun static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
361*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
362*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
363*4882a593Smuzhiyun 	u32 misc_ctrl, clk_ctrl, pad_ctrl;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	sdhci_and_cqhci_reset(host, mask);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	if (!(mask & SDHCI_RESET_ALL))
368*4882a593Smuzhiyun 		return;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	tegra_sdhci_set_tap(host, tegra_host->default_tap);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	misc_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
373*4882a593Smuzhiyun 	clk_ctrl = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	misc_ctrl &= ~(SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 |
376*4882a593Smuzhiyun 		       SDHCI_MISC_CTRL_ENABLE_SDR50 |
377*4882a593Smuzhiyun 		       SDHCI_MISC_CTRL_ENABLE_DDR50 |
378*4882a593Smuzhiyun 		       SDHCI_MISC_CTRL_ENABLE_SDR104);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	clk_ctrl &= ~(SDHCI_CLOCK_CTRL_TRIM_MASK |
381*4882a593Smuzhiyun 		      SDHCI_CLOCK_CTRL_SPI_MODE_CLKEN_OVERRIDE);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (tegra_sdhci_is_pad_and_regulator_valid(host)) {
384*4882a593Smuzhiyun 		/* Erratum: Enable SDHCI spec v3.00 support */
385*4882a593Smuzhiyun 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300)
386*4882a593Smuzhiyun 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
387*4882a593Smuzhiyun 		/* Advertise UHS modes as supported by host */
388*4882a593Smuzhiyun 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
389*4882a593Smuzhiyun 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR50;
390*4882a593Smuzhiyun 		if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
391*4882a593Smuzhiyun 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50;
392*4882a593Smuzhiyun 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104)
393*4882a593Smuzhiyun 			misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104;
394*4882a593Smuzhiyun 		if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50)
395*4882a593Smuzhiyun 			clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	clk_ctrl |= tegra_host->default_trim << SDHCI_CLOCK_CTRL_TRIM_SHIFT;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	sdhci_writel(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
401*4882a593Smuzhiyun 	sdhci_writel(host, clk_ctrl, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB) {
404*4882a593Smuzhiyun 		pad_ctrl = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
405*4882a593Smuzhiyun 		pad_ctrl &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_MASK;
406*4882a593Smuzhiyun 		pad_ctrl |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_VREF_SEL_VAL;
407*4882a593Smuzhiyun 		sdhci_writel(host, pad_ctrl, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 		tegra_host->pad_calib_required = true;
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	tegra_host->ddr_signaling = false;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
tegra_sdhci_configure_cal_pad(struct sdhci_host * host,bool enable)415*4882a593Smuzhiyun static void tegra_sdhci_configure_cal_pad(struct sdhci_host *host, bool enable)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	u32 val;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/*
420*4882a593Smuzhiyun 	 * Enable or disable the additional I/O pad used by the drive strength
421*4882a593Smuzhiyun 	 * calibration process.
422*4882a593Smuzhiyun 	 */
423*4882a593Smuzhiyun 	val = sdhci_readl(host, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (enable)
426*4882a593Smuzhiyun 		val |= SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
427*4882a593Smuzhiyun 	else
428*4882a593Smuzhiyun 		val &= ~SDHCI_TEGRA_SDMEM_COMP_PADCTRL_E_INPUT_E_PWRD;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	sdhci_writel(host, val, SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (enable)
433*4882a593Smuzhiyun 		usleep_range(1, 2);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
tegra_sdhci_set_pad_autocal_offset(struct sdhci_host * host,u16 pdpu)436*4882a593Smuzhiyun static void tegra_sdhci_set_pad_autocal_offset(struct sdhci_host *host,
437*4882a593Smuzhiyun 					       u16 pdpu)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	u32 reg;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
442*4882a593Smuzhiyun 	reg &= ~SDHCI_AUTO_CAL_PDPU_OFFSET_MASK;
443*4882a593Smuzhiyun 	reg |= pdpu;
444*4882a593Smuzhiyun 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
tegra_sdhci_set_padctrl(struct sdhci_host * host,int voltage,bool state_drvupdn)447*4882a593Smuzhiyun static int tegra_sdhci_set_padctrl(struct sdhci_host *host, int voltage,
448*4882a593Smuzhiyun 				   bool state_drvupdn)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
451*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
452*4882a593Smuzhiyun 	struct sdhci_tegra_autocal_offsets *offsets =
453*4882a593Smuzhiyun 						&tegra_host->autocal_offsets;
454*4882a593Smuzhiyun 	struct pinctrl_state *pinctrl_drvupdn = NULL;
455*4882a593Smuzhiyun 	int ret = 0;
456*4882a593Smuzhiyun 	u8 drvup = 0, drvdn = 0;
457*4882a593Smuzhiyun 	u32 reg;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	if (!state_drvupdn) {
460*4882a593Smuzhiyun 		/* PADS Drive Strength */
461*4882a593Smuzhiyun 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
462*4882a593Smuzhiyun 			if (tegra_host->pinctrl_state_1v8_drv) {
463*4882a593Smuzhiyun 				pinctrl_drvupdn =
464*4882a593Smuzhiyun 					tegra_host->pinctrl_state_1v8_drv;
465*4882a593Smuzhiyun 			} else {
466*4882a593Smuzhiyun 				drvup = offsets->pull_up_1v8_timeout;
467*4882a593Smuzhiyun 				drvdn = offsets->pull_down_1v8_timeout;
468*4882a593Smuzhiyun 			}
469*4882a593Smuzhiyun 		} else {
470*4882a593Smuzhiyun 			if (tegra_host->pinctrl_state_3v3_drv) {
471*4882a593Smuzhiyun 				pinctrl_drvupdn =
472*4882a593Smuzhiyun 					tegra_host->pinctrl_state_3v3_drv;
473*4882a593Smuzhiyun 			} else {
474*4882a593Smuzhiyun 				drvup = offsets->pull_up_3v3_timeout;
475*4882a593Smuzhiyun 				drvdn = offsets->pull_down_3v3_timeout;
476*4882a593Smuzhiyun 			}
477*4882a593Smuzhiyun 		}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		if (pinctrl_drvupdn != NULL) {
480*4882a593Smuzhiyun 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
481*4882a593Smuzhiyun 							pinctrl_drvupdn);
482*4882a593Smuzhiyun 			if (ret < 0)
483*4882a593Smuzhiyun 				dev_err(mmc_dev(host->mmc),
484*4882a593Smuzhiyun 					"failed pads drvupdn, ret: %d\n", ret);
485*4882a593Smuzhiyun 		} else if ((drvup) || (drvdn)) {
486*4882a593Smuzhiyun 			reg = sdhci_readl(host,
487*4882a593Smuzhiyun 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
488*4882a593Smuzhiyun 			reg &= ~SDHCI_COMP_PADCTRL_DRVUPDN_OFFSET_MASK;
489*4882a593Smuzhiyun 			reg |= (drvup << 20) | (drvdn << 12);
490*4882a593Smuzhiyun 			sdhci_writel(host, reg,
491*4882a593Smuzhiyun 					SDHCI_TEGRA_SDMEM_COMP_PADCTRL);
492*4882a593Smuzhiyun 		}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	} else {
495*4882a593Smuzhiyun 		/* Dual Voltage PADS Voltage selection */
496*4882a593Smuzhiyun 		if (!tegra_host->pad_control_available)
497*4882a593Smuzhiyun 			return 0;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		if (voltage == MMC_SIGNAL_VOLTAGE_180) {
500*4882a593Smuzhiyun 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
501*4882a593Smuzhiyun 						tegra_host->pinctrl_state_1v8);
502*4882a593Smuzhiyun 			if (ret < 0)
503*4882a593Smuzhiyun 				dev_err(mmc_dev(host->mmc),
504*4882a593Smuzhiyun 					"setting 1.8V failed, ret: %d\n", ret);
505*4882a593Smuzhiyun 		} else {
506*4882a593Smuzhiyun 			ret = pinctrl_select_state(tegra_host->pinctrl_sdmmc,
507*4882a593Smuzhiyun 						tegra_host->pinctrl_state_3v3);
508*4882a593Smuzhiyun 			if (ret < 0)
509*4882a593Smuzhiyun 				dev_err(mmc_dev(host->mmc),
510*4882a593Smuzhiyun 					"setting 3.3V failed, ret: %d\n", ret);
511*4882a593Smuzhiyun 		}
512*4882a593Smuzhiyun 	}
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	return ret;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
tegra_sdhci_pad_autocalib(struct sdhci_host * host)517*4882a593Smuzhiyun static void tegra_sdhci_pad_autocalib(struct sdhci_host *host)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
520*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
521*4882a593Smuzhiyun 	struct sdhci_tegra_autocal_offsets offsets =
522*4882a593Smuzhiyun 			tegra_host->autocal_offsets;
523*4882a593Smuzhiyun 	struct mmc_ios *ios = &host->mmc->ios;
524*4882a593Smuzhiyun 	bool card_clk_enabled;
525*4882a593Smuzhiyun 	u16 pdpu;
526*4882a593Smuzhiyun 	u32 reg;
527*4882a593Smuzhiyun 	int ret;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	switch (ios->timing) {
530*4882a593Smuzhiyun 	case MMC_TIMING_UHS_SDR104:
531*4882a593Smuzhiyun 		pdpu = offsets.pull_down_sdr104 << 8 | offsets.pull_up_sdr104;
532*4882a593Smuzhiyun 		break;
533*4882a593Smuzhiyun 	case MMC_TIMING_MMC_HS400:
534*4882a593Smuzhiyun 		pdpu = offsets.pull_down_hs400 << 8 | offsets.pull_up_hs400;
535*4882a593Smuzhiyun 		break;
536*4882a593Smuzhiyun 	default:
537*4882a593Smuzhiyun 		if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
538*4882a593Smuzhiyun 			pdpu = offsets.pull_down_1v8 << 8 | offsets.pull_up_1v8;
539*4882a593Smuzhiyun 		else
540*4882a593Smuzhiyun 			pdpu = offsets.pull_down_3v3 << 8 | offsets.pull_up_3v3;
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* Set initial offset before auto-calibration */
544*4882a593Smuzhiyun 	tegra_sdhci_set_pad_autocal_offset(host, pdpu);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	card_clk_enabled = tegra_sdhci_configure_card_clk(host, false);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	tegra_sdhci_configure_cal_pad(host, true);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
551*4882a593Smuzhiyun 	reg |= SDHCI_AUTO_CAL_ENABLE | SDHCI_AUTO_CAL_START;
552*4882a593Smuzhiyun 	sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	usleep_range(1, 2);
555*4882a593Smuzhiyun 	/* 10 ms timeout */
556*4882a593Smuzhiyun 	ret = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_AUTO_CAL_STATUS,
557*4882a593Smuzhiyun 				 reg, !(reg & SDHCI_TEGRA_AUTO_CAL_ACTIVE),
558*4882a593Smuzhiyun 				 1000, 10000);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	tegra_sdhci_configure_cal_pad(host, false);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	tegra_sdhci_configure_card_clk(host, card_clk_enabled);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (ret) {
565*4882a593Smuzhiyun 		dev_err(mmc_dev(host->mmc), "Pad autocal timed out\n");
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 		/* Disable automatic cal and use fixed Drive Strengths */
568*4882a593Smuzhiyun 		reg = sdhci_readl(host, SDHCI_TEGRA_AUTO_CAL_CONFIG);
569*4882a593Smuzhiyun 		reg &= ~SDHCI_AUTO_CAL_ENABLE;
570*4882a593Smuzhiyun 		sdhci_writel(host, reg, SDHCI_TEGRA_AUTO_CAL_CONFIG);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, false);
573*4882a593Smuzhiyun 		if (ret < 0)
574*4882a593Smuzhiyun 			dev_err(mmc_dev(host->mmc),
575*4882a593Smuzhiyun 				"Setting drive strengths failed: %d\n", ret);
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host * host)579*4882a593Smuzhiyun static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
582*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
583*4882a593Smuzhiyun 	struct sdhci_tegra_autocal_offsets *autocal =
584*4882a593Smuzhiyun 			&tegra_host->autocal_offsets;
585*4882a593Smuzhiyun 	int err;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
588*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-up-offset-3v3",
589*4882a593Smuzhiyun 			&autocal->pull_up_3v3);
590*4882a593Smuzhiyun 	if (err)
591*4882a593Smuzhiyun 		autocal->pull_up_3v3 = 0;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
594*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-down-offset-3v3",
595*4882a593Smuzhiyun 			&autocal->pull_down_3v3);
596*4882a593Smuzhiyun 	if (err)
597*4882a593Smuzhiyun 		autocal->pull_down_3v3 = 0;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
600*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-up-offset-1v8",
601*4882a593Smuzhiyun 			&autocal->pull_up_1v8);
602*4882a593Smuzhiyun 	if (err)
603*4882a593Smuzhiyun 		autocal->pull_up_1v8 = 0;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
606*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-down-offset-1v8",
607*4882a593Smuzhiyun 			&autocal->pull_down_1v8);
608*4882a593Smuzhiyun 	if (err)
609*4882a593Smuzhiyun 		autocal->pull_down_1v8 = 0;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
612*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-up-offset-sdr104",
613*4882a593Smuzhiyun 			&autocal->pull_up_sdr104);
614*4882a593Smuzhiyun 	if (err)
615*4882a593Smuzhiyun 		autocal->pull_up_sdr104 = autocal->pull_up_1v8;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
618*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-down-offset-sdr104",
619*4882a593Smuzhiyun 			&autocal->pull_down_sdr104);
620*4882a593Smuzhiyun 	if (err)
621*4882a593Smuzhiyun 		autocal->pull_down_sdr104 = autocal->pull_down_1v8;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
624*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-up-offset-hs400",
625*4882a593Smuzhiyun 			&autocal->pull_up_hs400);
626*4882a593Smuzhiyun 	if (err)
627*4882a593Smuzhiyun 		autocal->pull_up_hs400 = autocal->pull_up_1v8;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
630*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-down-offset-hs400",
631*4882a593Smuzhiyun 			&autocal->pull_down_hs400);
632*4882a593Smuzhiyun 	if (err)
633*4882a593Smuzhiyun 		autocal->pull_down_hs400 = autocal->pull_down_1v8;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	/*
636*4882a593Smuzhiyun 	 * Different fail-safe drive strength values based on the signaling
637*4882a593Smuzhiyun 	 * voltage are applicable for SoCs supporting 3V3 and 1V8 pad controls.
638*4882a593Smuzhiyun 	 * So, avoid reading below device tree properties for SoCs that don't
639*4882a593Smuzhiyun 	 * have NVQUIRK_NEEDS_PAD_CONTROL.
640*4882a593Smuzhiyun 	 */
641*4882a593Smuzhiyun 	if (!(tegra_host->soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL))
642*4882a593Smuzhiyun 		return;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
645*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
646*4882a593Smuzhiyun 			&autocal->pull_up_3v3_timeout);
647*4882a593Smuzhiyun 	if (err) {
648*4882a593Smuzhiyun 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
649*4882a593Smuzhiyun 			(tegra_host->pinctrl_state_3v3_drv == NULL))
650*4882a593Smuzhiyun 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
651*4882a593Smuzhiyun 				mmc_hostname(host->mmc));
652*4882a593Smuzhiyun 		autocal->pull_up_3v3_timeout = 0;
653*4882a593Smuzhiyun 	}
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
656*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
657*4882a593Smuzhiyun 			&autocal->pull_down_3v3_timeout);
658*4882a593Smuzhiyun 	if (err) {
659*4882a593Smuzhiyun 		if (!IS_ERR(tegra_host->pinctrl_state_3v3) &&
660*4882a593Smuzhiyun 			(tegra_host->pinctrl_state_3v3_drv == NULL))
661*4882a593Smuzhiyun 			pr_warn("%s: Missing autocal timeout 3v3-pad drvs\n",
662*4882a593Smuzhiyun 				mmc_hostname(host->mmc));
663*4882a593Smuzhiyun 		autocal->pull_down_3v3_timeout = 0;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
667*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
668*4882a593Smuzhiyun 			&autocal->pull_up_1v8_timeout);
669*4882a593Smuzhiyun 	if (err) {
670*4882a593Smuzhiyun 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
671*4882a593Smuzhiyun 			(tegra_host->pinctrl_state_1v8_drv == NULL))
672*4882a593Smuzhiyun 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
673*4882a593Smuzhiyun 				mmc_hostname(host->mmc));
674*4882a593Smuzhiyun 		autocal->pull_up_1v8_timeout = 0;
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent,
678*4882a593Smuzhiyun 			"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
679*4882a593Smuzhiyun 			&autocal->pull_down_1v8_timeout);
680*4882a593Smuzhiyun 	if (err) {
681*4882a593Smuzhiyun 		if (!IS_ERR(tegra_host->pinctrl_state_1v8) &&
682*4882a593Smuzhiyun 			(tegra_host->pinctrl_state_1v8_drv == NULL))
683*4882a593Smuzhiyun 			pr_warn("%s: Missing autocal timeout 1v8-pad drvs\n",
684*4882a593Smuzhiyun 				mmc_hostname(host->mmc));
685*4882a593Smuzhiyun 		autocal->pull_down_1v8_timeout = 0;
686*4882a593Smuzhiyun 	}
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
tegra_sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)689*4882a593Smuzhiyun static void tegra_sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun 	struct sdhci_host *host = mmc_priv(mmc);
692*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
693*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
694*4882a593Smuzhiyun 	ktime_t since_calib = ktime_sub(ktime_get(), tegra_host->last_calib);
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	/* 100 ms calibration interval is specified in the TRM */
697*4882a593Smuzhiyun 	if (ktime_to_ms(since_calib) > 100) {
698*4882a593Smuzhiyun 		tegra_sdhci_pad_autocalib(host);
699*4882a593Smuzhiyun 		tegra_host->last_calib = ktime_get();
700*4882a593Smuzhiyun 	}
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	sdhci_request(mmc, mrq);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun 
tegra_sdhci_parse_tap_and_trim(struct sdhci_host * host)705*4882a593Smuzhiyun static void tegra_sdhci_parse_tap_and_trim(struct sdhci_host *host)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
708*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
709*4882a593Smuzhiyun 	int err;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent, "nvidia,default-tap",
712*4882a593Smuzhiyun 				       &tegra_host->default_tap);
713*4882a593Smuzhiyun 	if (err)
714*4882a593Smuzhiyun 		tegra_host->default_tap = 0;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent, "nvidia,default-trim",
717*4882a593Smuzhiyun 				       &tegra_host->default_trim);
718*4882a593Smuzhiyun 	if (err)
719*4882a593Smuzhiyun 		tegra_host->default_trim = 0;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	err = device_property_read_u32(host->mmc->parent, "nvidia,dqs-trim",
722*4882a593Smuzhiyun 				       &tegra_host->dqs_trim);
723*4882a593Smuzhiyun 	if (err)
724*4882a593Smuzhiyun 		tegra_host->dqs_trim = 0x11;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun 
tegra_sdhci_parse_dt(struct sdhci_host * host)727*4882a593Smuzhiyun static void tegra_sdhci_parse_dt(struct sdhci_host *host)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
730*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	if (device_property_read_bool(host->mmc->parent, "supports-cqe"))
733*4882a593Smuzhiyun 		tegra_host->enable_hwcq = true;
734*4882a593Smuzhiyun 	else
735*4882a593Smuzhiyun 		tegra_host->enable_hwcq = false;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	tegra_sdhci_parse_pad_autocal_dt(host);
738*4882a593Smuzhiyun 	tegra_sdhci_parse_tap_and_trim(host);
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun 
tegra_sdhci_set_clock(struct sdhci_host * host,unsigned int clock)741*4882a593Smuzhiyun static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
744*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
745*4882a593Smuzhiyun 	unsigned long host_clk;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	if (!clock)
748*4882a593Smuzhiyun 		return sdhci_set_clock(host, clock);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	/*
751*4882a593Smuzhiyun 	 * In DDR50/52 modes the Tegra SDHCI controllers require the SDHCI
752*4882a593Smuzhiyun 	 * divider to be configured to divided the host clock by two. The SDHCI
753*4882a593Smuzhiyun 	 * clock divider is calculated as part of sdhci_set_clock() by
754*4882a593Smuzhiyun 	 * sdhci_calc_clk(). The divider is calculated from host->max_clk and
755*4882a593Smuzhiyun 	 * the requested clock rate.
756*4882a593Smuzhiyun 	 *
757*4882a593Smuzhiyun 	 * By setting the host->max_clk to clock * 2 the divider calculation
758*4882a593Smuzhiyun 	 * will always result in the correct value for DDR50/52 modes,
759*4882a593Smuzhiyun 	 * regardless of clock rate rounding, which may happen if the value
760*4882a593Smuzhiyun 	 * from clk_get_rate() is used.
761*4882a593Smuzhiyun 	 */
762*4882a593Smuzhiyun 	host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
763*4882a593Smuzhiyun 	clk_set_rate(pltfm_host->clk, host_clk);
764*4882a593Smuzhiyun 	tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
765*4882a593Smuzhiyun 	if (tegra_host->ddr_signaling)
766*4882a593Smuzhiyun 		host->max_clk = host_clk;
767*4882a593Smuzhiyun 	else
768*4882a593Smuzhiyun 		host->max_clk = clk_get_rate(pltfm_host->clk);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	sdhci_set_clock(host, clock);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 	if (tegra_host->pad_calib_required) {
773*4882a593Smuzhiyun 		tegra_sdhci_pad_autocalib(host);
774*4882a593Smuzhiyun 		tegra_host->pad_calib_required = false;
775*4882a593Smuzhiyun 	}
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun 
tegra_sdhci_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)778*4882a593Smuzhiyun static void tegra_sdhci_hs400_enhanced_strobe(struct mmc_host *mmc,
779*4882a593Smuzhiyun 					      struct mmc_ios *ios)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun 	struct sdhci_host *host = mmc_priv(mmc);
782*4882a593Smuzhiyun 	u32 val;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	if (ios->enhanced_strobe) {
787*4882a593Smuzhiyun 		val |= SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
788*4882a593Smuzhiyun 		/*
789*4882a593Smuzhiyun 		 * When CMD13 is sent from mmc_select_hs400es() after
790*4882a593Smuzhiyun 		 * switching to HS400ES mode, the bus is operating at
791*4882a593Smuzhiyun 		 * either MMC_HIGH_26_MAX_DTR or MMC_HIGH_52_MAX_DTR.
792*4882a593Smuzhiyun 		 * To meet Tegra SDHCI requirement at HS400ES mode, force SDHCI
793*4882a593Smuzhiyun 		 * interface clock to MMC_HS200_MAX_DTR (200 MHz) so that host
794*4882a593Smuzhiyun 		 * controller CAR clock and the interface clock are rate matched.
795*4882a593Smuzhiyun 		 */
796*4882a593Smuzhiyun 		tegra_sdhci_set_clock(host, MMC_HS200_MAX_DTR);
797*4882a593Smuzhiyun 	} else {
798*4882a593Smuzhiyun 		val &= ~SDHCI_TEGRA_SYS_SW_CTRL_ENHANCED_STROBE;
799*4882a593Smuzhiyun 	}
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_SYS_SW_CTRL);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun 
tegra_sdhci_get_max_clock(struct sdhci_host * host)804*4882a593Smuzhiyun static unsigned int tegra_sdhci_get_max_clock(struct sdhci_host *host)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	return clk_round_rate(pltfm_host->clk, UINT_MAX);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun 
tegra_sdhci_set_dqs_trim(struct sdhci_host * host,u8 trim)811*4882a593Smuzhiyun static void tegra_sdhci_set_dqs_trim(struct sdhci_host *host, u8 trim)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	u32 val;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
816*4882a593Smuzhiyun 	val &= ~SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_MASK;
817*4882a593Smuzhiyun 	val |= trim << SDHCI_TEGRA_CAP_OVERRIDES_DQS_TRIM_SHIFT;
818*4882a593Smuzhiyun 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_CAP_OVERRIDES);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun 
tegra_sdhci_hs400_dll_cal(struct sdhci_host * host)821*4882a593Smuzhiyun static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	u32 reg;
824*4882a593Smuzhiyun 	int err;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	reg = sdhci_readl(host, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
827*4882a593Smuzhiyun 	reg |= SDHCI_TEGRA_DLLCAL_CALIBRATE;
828*4882a593Smuzhiyun 	sdhci_writel(host, reg, SDHCI_TEGRA_VENDOR_DLLCAL_CFG);
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	/* 1 ms sleep, 5 ms timeout */
831*4882a593Smuzhiyun 	err = readl_poll_timeout(host->ioaddr + SDHCI_TEGRA_VENDOR_DLLCAL_STA,
832*4882a593Smuzhiyun 				 reg, !(reg & SDHCI_TEGRA_DLLCAL_STA_ACTIVE),
833*4882a593Smuzhiyun 				 1000, 5000);
834*4882a593Smuzhiyun 	if (err)
835*4882a593Smuzhiyun 		dev_err(mmc_dev(host->mmc),
836*4882a593Smuzhiyun 			"HS400 delay line calibration timed out\n");
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun 
tegra_sdhci_tap_correction(struct sdhci_host * host,u8 thd_up,u8 thd_low,u8 fixed_tap)839*4882a593Smuzhiyun static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
840*4882a593Smuzhiyun 				       u8 thd_low, u8 fixed_tap)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
843*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
844*4882a593Smuzhiyun 	u32 val, tun_status;
845*4882a593Smuzhiyun 	u8 word, bit, edge1, tap, window;
846*4882a593Smuzhiyun 	bool tap_result;
847*4882a593Smuzhiyun 	bool start_fail = false;
848*4882a593Smuzhiyun 	bool start_pass = false;
849*4882a593Smuzhiyun 	bool end_pass = false;
850*4882a593Smuzhiyun 	bool first_fail = false;
851*4882a593Smuzhiyun 	bool first_pass = false;
852*4882a593Smuzhiyun 	u8 start_pass_tap = 0;
853*4882a593Smuzhiyun 	u8 end_pass_tap = 0;
854*4882a593Smuzhiyun 	u8 first_fail_tap = 0;
855*4882a593Smuzhiyun 	u8 first_pass_tap = 0;
856*4882a593Smuzhiyun 	u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	/*
859*4882a593Smuzhiyun 	 * Read auto-tuned results and extract good valid passing window by
860*4882a593Smuzhiyun 	 * filtering out un-wanted bubble/partial/merged windows.
861*4882a593Smuzhiyun 	 */
862*4882a593Smuzhiyun 	for (word = 0; word < total_tuning_words; word++) {
863*4882a593Smuzhiyun 		val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
864*4882a593Smuzhiyun 		val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
865*4882a593Smuzhiyun 		val |= word;
866*4882a593Smuzhiyun 		sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
867*4882a593Smuzhiyun 		tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
868*4882a593Smuzhiyun 		bit = 0;
869*4882a593Smuzhiyun 		while (bit < TUNING_WORD_BIT_SIZE) {
870*4882a593Smuzhiyun 			tap = word * TUNING_WORD_BIT_SIZE + bit;
871*4882a593Smuzhiyun 			tap_result = tun_status & (1 << bit);
872*4882a593Smuzhiyun 			if (!tap_result && !start_fail) {
873*4882a593Smuzhiyun 				start_fail = true;
874*4882a593Smuzhiyun 				if (!first_fail) {
875*4882a593Smuzhiyun 					first_fail_tap = tap;
876*4882a593Smuzhiyun 					first_fail = true;
877*4882a593Smuzhiyun 				}
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 			} else if (tap_result && start_fail && !start_pass) {
880*4882a593Smuzhiyun 				start_pass_tap = tap;
881*4882a593Smuzhiyun 				start_pass = true;
882*4882a593Smuzhiyun 				if (!first_pass) {
883*4882a593Smuzhiyun 					first_pass_tap = tap;
884*4882a593Smuzhiyun 					first_pass = true;
885*4882a593Smuzhiyun 				}
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 			} else if (!tap_result && start_fail && start_pass &&
888*4882a593Smuzhiyun 				   !end_pass) {
889*4882a593Smuzhiyun 				end_pass_tap = tap - 1;
890*4882a593Smuzhiyun 				end_pass = true;
891*4882a593Smuzhiyun 			} else if (tap_result && start_pass && start_fail &&
892*4882a593Smuzhiyun 				   end_pass) {
893*4882a593Smuzhiyun 				window = end_pass_tap - start_pass_tap;
894*4882a593Smuzhiyun 				/* discard merged window and bubble window */
895*4882a593Smuzhiyun 				if (window >= thd_up || window < thd_low) {
896*4882a593Smuzhiyun 					start_pass_tap = tap;
897*4882a593Smuzhiyun 					end_pass = false;
898*4882a593Smuzhiyun 				} else {
899*4882a593Smuzhiyun 					/* set tap at middle of valid window */
900*4882a593Smuzhiyun 					tap = start_pass_tap + window / 2;
901*4882a593Smuzhiyun 					tegra_host->tuned_tap_delay = tap;
902*4882a593Smuzhiyun 					return;
903*4882a593Smuzhiyun 				}
904*4882a593Smuzhiyun 			}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 			bit++;
907*4882a593Smuzhiyun 		}
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	if (!first_fail) {
911*4882a593Smuzhiyun 		WARN(1, "no edge detected, continue with hw tuned delay.\n");
912*4882a593Smuzhiyun 	} else if (first_pass) {
913*4882a593Smuzhiyun 		/* set tap location at fixed tap relative to the first edge */
914*4882a593Smuzhiyun 		edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
915*4882a593Smuzhiyun 		if (edge1 - 1 > fixed_tap)
916*4882a593Smuzhiyun 			tegra_host->tuned_tap_delay = edge1 - fixed_tap;
917*4882a593Smuzhiyun 		else
918*4882a593Smuzhiyun 			tegra_host->tuned_tap_delay = edge1 + fixed_tap;
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun 
tegra_sdhci_post_tuning(struct sdhci_host * host)922*4882a593Smuzhiyun static void tegra_sdhci_post_tuning(struct sdhci_host *host)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
925*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
926*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
927*4882a593Smuzhiyun 	u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
928*4882a593Smuzhiyun 	u8 fixed_tap, start_tap, end_tap, window_width;
929*4882a593Smuzhiyun 	u8 thdupper, thdlower;
930*4882a593Smuzhiyun 	u8 num_iter;
931*4882a593Smuzhiyun 	u32 clk_rate_mhz, period_ps, bestcase, worstcase;
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	/* retain HW tuned tap to use incase if no correction is needed */
934*4882a593Smuzhiyun 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
935*4882a593Smuzhiyun 	tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
936*4882a593Smuzhiyun 				      SDHCI_CLOCK_CTRL_TAP_SHIFT;
937*4882a593Smuzhiyun 	if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
938*4882a593Smuzhiyun 		min_tap_dly = soc_data->min_tap_delay;
939*4882a593Smuzhiyun 		max_tap_dly = soc_data->max_tap_delay;
940*4882a593Smuzhiyun 		clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
941*4882a593Smuzhiyun 		period_ps = USEC_PER_SEC / clk_rate_mhz;
942*4882a593Smuzhiyun 		bestcase = period_ps / min_tap_dly;
943*4882a593Smuzhiyun 		worstcase = period_ps / max_tap_dly;
944*4882a593Smuzhiyun 		/*
945*4882a593Smuzhiyun 		 * Upper and Lower bound thresholds used to detect merged and
946*4882a593Smuzhiyun 		 * bubble windows
947*4882a593Smuzhiyun 		 */
948*4882a593Smuzhiyun 		thdupper = (2 * worstcase + bestcase) / 2;
949*4882a593Smuzhiyun 		thdlower = worstcase / 4;
950*4882a593Smuzhiyun 		/*
951*4882a593Smuzhiyun 		 * fixed tap is used when HW tuning result contains single edge
952*4882a593Smuzhiyun 		 * and tap is set at fixed tap delay relative to the first edge
953*4882a593Smuzhiyun 		 */
954*4882a593Smuzhiyun 		avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
955*4882a593Smuzhiyun 		fixed_tap = avg_tap_dly / 2;
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 		val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
958*4882a593Smuzhiyun 		start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
959*4882a593Smuzhiyun 		end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
960*4882a593Smuzhiyun 			  SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
961*4882a593Smuzhiyun 		window_width = end_tap - start_tap;
962*4882a593Smuzhiyun 		num_iter = host->tuning_loop_count;
963*4882a593Smuzhiyun 		/*
964*4882a593Smuzhiyun 		 * partial window includes edges of the tuning range.
965*4882a593Smuzhiyun 		 * merged window includes more taps so window width is higher
966*4882a593Smuzhiyun 		 * than upper threshold.
967*4882a593Smuzhiyun 		 */
968*4882a593Smuzhiyun 		if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
969*4882a593Smuzhiyun 		    (end_tap == num_iter - 2) || window_width >= thdupper) {
970*4882a593Smuzhiyun 			pr_debug("%s: Apply tuning correction\n",
971*4882a593Smuzhiyun 				 mmc_hostname(host->mmc));
972*4882a593Smuzhiyun 			tegra_sdhci_tap_correction(host, thdupper, thdlower,
973*4882a593Smuzhiyun 						   fixed_tap);
974*4882a593Smuzhiyun 		}
975*4882a593Smuzhiyun 	}
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun 
tegra_sdhci_execute_hw_tuning(struct mmc_host * mmc,u32 opcode)980*4882a593Smuzhiyun static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun 	struct sdhci_host *host = mmc_priv(mmc);
983*4882a593Smuzhiyun 	int err;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	err = sdhci_execute_tuning(mmc, opcode);
986*4882a593Smuzhiyun 	if (!err && !host->tuning_err)
987*4882a593Smuzhiyun 		tegra_sdhci_post_tuning(host);
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	return err;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun 
tegra_sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)992*4882a593Smuzhiyun static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
993*4882a593Smuzhiyun 					  unsigned timing)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
996*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
997*4882a593Smuzhiyun 	bool set_default_tap = false;
998*4882a593Smuzhiyun 	bool set_dqs_trim = false;
999*4882a593Smuzhiyun 	bool do_hs400_dll_cal = false;
1000*4882a593Smuzhiyun 	u8 iter = TRIES_256;
1001*4882a593Smuzhiyun 	u32 val;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	tegra_host->ddr_signaling = false;
1004*4882a593Smuzhiyun 	switch (timing) {
1005*4882a593Smuzhiyun 	case MMC_TIMING_UHS_SDR50:
1006*4882a593Smuzhiyun 		break;
1007*4882a593Smuzhiyun 	case MMC_TIMING_UHS_SDR104:
1008*4882a593Smuzhiyun 	case MMC_TIMING_MMC_HS200:
1009*4882a593Smuzhiyun 		/* Don't set default tap on tunable modes. */
1010*4882a593Smuzhiyun 		iter = TRIES_128;
1011*4882a593Smuzhiyun 		break;
1012*4882a593Smuzhiyun 	case MMC_TIMING_MMC_HS400:
1013*4882a593Smuzhiyun 		set_dqs_trim = true;
1014*4882a593Smuzhiyun 		do_hs400_dll_cal = true;
1015*4882a593Smuzhiyun 		iter = TRIES_128;
1016*4882a593Smuzhiyun 		break;
1017*4882a593Smuzhiyun 	case MMC_TIMING_MMC_DDR52:
1018*4882a593Smuzhiyun 	case MMC_TIMING_UHS_DDR50:
1019*4882a593Smuzhiyun 		tegra_host->ddr_signaling = true;
1020*4882a593Smuzhiyun 		set_default_tap = true;
1021*4882a593Smuzhiyun 		break;
1022*4882a593Smuzhiyun 	default:
1023*4882a593Smuzhiyun 		set_default_tap = true;
1024*4882a593Smuzhiyun 		break;
1025*4882a593Smuzhiyun 	}
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
1028*4882a593Smuzhiyun 	val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
1029*4882a593Smuzhiyun 		 SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
1030*4882a593Smuzhiyun 		 SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
1031*4882a593Smuzhiyun 	val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
1032*4882a593Smuzhiyun 		0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
1033*4882a593Smuzhiyun 		1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
1034*4882a593Smuzhiyun 	sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
1035*4882a593Smuzhiyun 	sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	sdhci_set_uhs_signaling(host, timing);
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	tegra_sdhci_pad_autocalib(host);
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	if (tegra_host->tuned_tap_delay && !set_default_tap)
1044*4882a593Smuzhiyun 		tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
1045*4882a593Smuzhiyun 	else
1046*4882a593Smuzhiyun 		tegra_sdhci_set_tap(host, tegra_host->default_tap);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	if (set_dqs_trim)
1049*4882a593Smuzhiyun 		tegra_sdhci_set_dqs_trim(host, tegra_host->dqs_trim);
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	if (do_hs400_dll_cal)
1052*4882a593Smuzhiyun 		tegra_sdhci_hs400_dll_cal(host);
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun 
tegra_sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)1055*4882a593Smuzhiyun static int tegra_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun 	unsigned int min, max;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	/*
1060*4882a593Smuzhiyun 	 * Start search for minimum tap value at 10, as smaller values are
1061*4882a593Smuzhiyun 	 * may wrongly be reported as working but fail at higher speeds,
1062*4882a593Smuzhiyun 	 * according to the TRM.
1063*4882a593Smuzhiyun 	 */
1064*4882a593Smuzhiyun 	min = 10;
1065*4882a593Smuzhiyun 	while (min < 255) {
1066*4882a593Smuzhiyun 		tegra_sdhci_set_tap(host, min);
1067*4882a593Smuzhiyun 		if (!mmc_send_tuning(host->mmc, opcode, NULL))
1068*4882a593Smuzhiyun 			break;
1069*4882a593Smuzhiyun 		min++;
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	/* Find the maximum tap value that still passes. */
1073*4882a593Smuzhiyun 	max = min + 1;
1074*4882a593Smuzhiyun 	while (max < 255) {
1075*4882a593Smuzhiyun 		tegra_sdhci_set_tap(host, max);
1076*4882a593Smuzhiyun 		if (mmc_send_tuning(host->mmc, opcode, NULL)) {
1077*4882a593Smuzhiyun 			max--;
1078*4882a593Smuzhiyun 			break;
1079*4882a593Smuzhiyun 		}
1080*4882a593Smuzhiyun 		max++;
1081*4882a593Smuzhiyun 	}
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	/* The TRM states the ideal tap value is at 75% in the passing range. */
1084*4882a593Smuzhiyun 	tegra_sdhci_set_tap(host, min + ((max - min) * 3 / 4));
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	return mmc_send_tuning(host->mmc, opcode, NULL);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun 
sdhci_tegra_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)1089*4882a593Smuzhiyun static int sdhci_tegra_start_signal_voltage_switch(struct mmc_host *mmc,
1090*4882a593Smuzhiyun 						   struct mmc_ios *ios)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun 	struct sdhci_host *host = mmc_priv(mmc);
1093*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1094*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1095*4882a593Smuzhiyun 	int ret = 0;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1098*4882a593Smuzhiyun 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1099*4882a593Smuzhiyun 		if (ret < 0)
1100*4882a593Smuzhiyun 			return ret;
1101*4882a593Smuzhiyun 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1102*4882a593Smuzhiyun 	} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1103*4882a593Smuzhiyun 		ret = sdhci_start_signal_voltage_switch(mmc, ios);
1104*4882a593Smuzhiyun 		if (ret < 0)
1105*4882a593Smuzhiyun 			return ret;
1106*4882a593Smuzhiyun 		ret = tegra_sdhci_set_padctrl(host, ios->signal_voltage, true);
1107*4882a593Smuzhiyun 	}
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	if (tegra_host->pad_calib_required)
1110*4882a593Smuzhiyun 		tegra_sdhci_pad_autocalib(host);
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	return ret;
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun 
tegra_sdhci_init_pinctrl_info(struct device * dev,struct sdhci_tegra * tegra_host)1115*4882a593Smuzhiyun static int tegra_sdhci_init_pinctrl_info(struct device *dev,
1116*4882a593Smuzhiyun 					 struct sdhci_tegra *tegra_host)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun 	tegra_host->pinctrl_sdmmc = devm_pinctrl_get(dev);
1119*4882a593Smuzhiyun 	if (IS_ERR(tegra_host->pinctrl_sdmmc)) {
1120*4882a593Smuzhiyun 		dev_dbg(dev, "No pinctrl info, err: %ld\n",
1121*4882a593Smuzhiyun 			PTR_ERR(tegra_host->pinctrl_sdmmc));
1122*4882a593Smuzhiyun 		return -1;
1123*4882a593Smuzhiyun 	}
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	tegra_host->pinctrl_state_1v8_drv = pinctrl_lookup_state(
1126*4882a593Smuzhiyun 				tegra_host->pinctrl_sdmmc, "sdmmc-1v8-drv");
1127*4882a593Smuzhiyun 	if (IS_ERR(tegra_host->pinctrl_state_1v8_drv)) {
1128*4882a593Smuzhiyun 		if (PTR_ERR(tegra_host->pinctrl_state_1v8_drv) == -ENODEV)
1129*4882a593Smuzhiyun 			tegra_host->pinctrl_state_1v8_drv = NULL;
1130*4882a593Smuzhiyun 	}
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	tegra_host->pinctrl_state_3v3_drv = pinctrl_lookup_state(
1133*4882a593Smuzhiyun 				tegra_host->pinctrl_sdmmc, "sdmmc-3v3-drv");
1134*4882a593Smuzhiyun 	if (IS_ERR(tegra_host->pinctrl_state_3v3_drv)) {
1135*4882a593Smuzhiyun 		if (PTR_ERR(tegra_host->pinctrl_state_3v3_drv) == -ENODEV)
1136*4882a593Smuzhiyun 			tegra_host->pinctrl_state_3v3_drv = NULL;
1137*4882a593Smuzhiyun 	}
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	tegra_host->pinctrl_state_3v3 =
1140*4882a593Smuzhiyun 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-3v3");
1141*4882a593Smuzhiyun 	if (IS_ERR(tegra_host->pinctrl_state_3v3)) {
1142*4882a593Smuzhiyun 		dev_warn(dev, "Missing 3.3V pad state, err: %ld\n",
1143*4882a593Smuzhiyun 			 PTR_ERR(tegra_host->pinctrl_state_3v3));
1144*4882a593Smuzhiyun 		return -1;
1145*4882a593Smuzhiyun 	}
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	tegra_host->pinctrl_state_1v8 =
1148*4882a593Smuzhiyun 		pinctrl_lookup_state(tegra_host->pinctrl_sdmmc, "sdmmc-1v8");
1149*4882a593Smuzhiyun 	if (IS_ERR(tegra_host->pinctrl_state_1v8)) {
1150*4882a593Smuzhiyun 		dev_warn(dev, "Missing 1.8V pad state, err: %ld\n",
1151*4882a593Smuzhiyun 			 PTR_ERR(tegra_host->pinctrl_state_1v8));
1152*4882a593Smuzhiyun 		return -1;
1153*4882a593Smuzhiyun 	}
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	tegra_host->pad_control_available = true;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	return 0;
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun 
tegra_sdhci_voltage_switch(struct sdhci_host * host)1160*4882a593Smuzhiyun static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1163*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1164*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1167*4882a593Smuzhiyun 		tegra_host->pad_calib_required = true;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun 
tegra_cqhci_writel(struct cqhci_host * cq_host,u32 val,int reg)1170*4882a593Smuzhiyun static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun 	struct mmc_host *mmc = cq_host->mmc;
1173*4882a593Smuzhiyun 	struct sdhci_host *host = mmc_priv(mmc);
1174*4882a593Smuzhiyun 	u8 ctrl;
1175*4882a593Smuzhiyun 	ktime_t timeout;
1176*4882a593Smuzhiyun 	bool timed_out;
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	/*
1179*4882a593Smuzhiyun 	 * During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
1180*4882a593Smuzhiyun 	 * cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
1181*4882a593Smuzhiyun 	 * to be re-configured.
1182*4882a593Smuzhiyun 	 * Tegra CQHCI/SDHCI prevents write access to block size register when
1183*4882a593Smuzhiyun 	 * CQE is unhalted. So handling CQE resume sequence here to configure
1184*4882a593Smuzhiyun 	 * SDHCI block registers prior to exiting CQE halt state.
1185*4882a593Smuzhiyun 	 */
1186*4882a593Smuzhiyun 	if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
1187*4882a593Smuzhiyun 	    cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
1188*4882a593Smuzhiyun 		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1189*4882a593Smuzhiyun 		sdhci_cqe_enable(mmc);
1190*4882a593Smuzhiyun 		writel(val, cq_host->mmio + reg);
1191*4882a593Smuzhiyun 		timeout = ktime_add_us(ktime_get(), 50);
1192*4882a593Smuzhiyun 		while (1) {
1193*4882a593Smuzhiyun 			timed_out = ktime_compare(ktime_get(), timeout) > 0;
1194*4882a593Smuzhiyun 			ctrl = cqhci_readl(cq_host, CQHCI_CTL);
1195*4882a593Smuzhiyun 			if (!(ctrl & CQHCI_HALT) || timed_out)
1196*4882a593Smuzhiyun 				break;
1197*4882a593Smuzhiyun 		}
1198*4882a593Smuzhiyun 		/*
1199*4882a593Smuzhiyun 		 * CQE usually resumes very quick, but incase if Tegra CQE
1200*4882a593Smuzhiyun 		 * doesn't resume retry unhalt.
1201*4882a593Smuzhiyun 		 */
1202*4882a593Smuzhiyun 		if (timed_out)
1203*4882a593Smuzhiyun 			writel(val, cq_host->mmio + reg);
1204*4882a593Smuzhiyun 	} else {
1205*4882a593Smuzhiyun 		writel(val, cq_host->mmio + reg);
1206*4882a593Smuzhiyun 	}
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun 
sdhci_tegra_update_dcmd_desc(struct mmc_host * mmc,struct mmc_request * mrq,u64 * data)1209*4882a593Smuzhiyun static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
1210*4882a593Smuzhiyun 					 struct mmc_request *mrq, u64 *data)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
1213*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1214*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
1217*4882a593Smuzhiyun 	    mrq->cmd->flags & MMC_RSP_R1B)
1218*4882a593Smuzhiyun 		*data |= CQHCI_CMD_TIMING(1);
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun 
sdhci_tegra_cqe_enable(struct mmc_host * mmc)1221*4882a593Smuzhiyun static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
1222*4882a593Smuzhiyun {
1223*4882a593Smuzhiyun 	struct cqhci_host *cq_host = mmc->cqe_private;
1224*4882a593Smuzhiyun 	struct sdhci_host *host = mmc_priv(mmc);
1225*4882a593Smuzhiyun 	u32 val;
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	/*
1228*4882a593Smuzhiyun 	 * Tegra CQHCI/SDMMC design prevents write access to sdhci block size
1229*4882a593Smuzhiyun 	 * register when CQE is enabled and unhalted.
1230*4882a593Smuzhiyun 	 * CQHCI driver enables CQE prior to activation, so disable CQE before
1231*4882a593Smuzhiyun 	 * programming block size in sdhci controller and enable it back.
1232*4882a593Smuzhiyun 	 */
1233*4882a593Smuzhiyun 	if (!cq_host->activated) {
1234*4882a593Smuzhiyun 		val = cqhci_readl(cq_host, CQHCI_CFG);
1235*4882a593Smuzhiyun 		if (val & CQHCI_ENABLE)
1236*4882a593Smuzhiyun 			cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
1237*4882a593Smuzhiyun 				     CQHCI_CFG);
1238*4882a593Smuzhiyun 		sdhci_writew(host, SDHCI_TEGRA_CQE_TRNS_MODE, SDHCI_TRANSFER_MODE);
1239*4882a593Smuzhiyun 		sdhci_cqe_enable(mmc);
1240*4882a593Smuzhiyun 		if (val & CQHCI_ENABLE)
1241*4882a593Smuzhiyun 			cqhci_writel(cq_host, val, CQHCI_CFG);
1242*4882a593Smuzhiyun 	}
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	/*
1245*4882a593Smuzhiyun 	 * CMD CRC errors are seen sometimes with some eMMC devices when status
1246*4882a593Smuzhiyun 	 * command is sent during transfer of last data block which is the
1247*4882a593Smuzhiyun 	 * default case as send status command block counter (CBC) is 1.
1248*4882a593Smuzhiyun 	 * Recommended fix to set CBC to 0 allowing send status command only
1249*4882a593Smuzhiyun 	 * when data lines are idle.
1250*4882a593Smuzhiyun 	 */
1251*4882a593Smuzhiyun 	val = cqhci_readl(cq_host, CQHCI_SSC1);
1252*4882a593Smuzhiyun 	val &= ~CQHCI_SSC1_CBC_MASK;
1253*4882a593Smuzhiyun 	cqhci_writel(cq_host, val, CQHCI_SSC1);
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun 
sdhci_tegra_dumpregs(struct mmc_host * mmc)1256*4882a593Smuzhiyun static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun 	sdhci_dumpregs(mmc_priv(mmc));
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun 
sdhci_tegra_cqhci_irq(struct sdhci_host * host,u32 intmask)1261*4882a593Smuzhiyun static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun 	int cmd_error = 0;
1264*4882a593Smuzhiyun 	int data_error = 0;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
1267*4882a593Smuzhiyun 		return intmask;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	cqhci_irq(host->mmc, intmask, cmd_error, data_error);
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	return 0;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun 
tegra_sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1274*4882a593Smuzhiyun static void tegra_sdhci_set_timeout(struct sdhci_host *host,
1275*4882a593Smuzhiyun 				    struct mmc_command *cmd)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun 	u32 val;
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	/*
1280*4882a593Smuzhiyun 	 * HW busy detection timeout is based on programmed data timeout
1281*4882a593Smuzhiyun 	 * counter and maximum supported timeout is 11s which may not be
1282*4882a593Smuzhiyun 	 * enough for long operations like cache flush, sleep awake, erase.
1283*4882a593Smuzhiyun 	 *
1284*4882a593Smuzhiyun 	 * ERASE_TIMEOUT_LIMIT bit of VENDOR_MISC_CTRL register allows
1285*4882a593Smuzhiyun 	 * host controller to wait for busy state until the card is busy
1286*4882a593Smuzhiyun 	 * without HW timeout.
1287*4882a593Smuzhiyun 	 *
1288*4882a593Smuzhiyun 	 * So, use infinite busy wait mode for operations that may take
1289*4882a593Smuzhiyun 	 * more than maximum HW busy timeout of 11s otherwise use finite
1290*4882a593Smuzhiyun 	 * busy wait mode.
1291*4882a593Smuzhiyun 	 */
1292*4882a593Smuzhiyun 	val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1293*4882a593Smuzhiyun 	if (cmd && cmd->busy_timeout >= 11 * MSEC_PER_SEC)
1294*4882a593Smuzhiyun 		val |= SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1295*4882a593Smuzhiyun 	else
1296*4882a593Smuzhiyun 		val &= ~SDHCI_MISC_CTRL_ERASE_TIMEOUT_LIMIT;
1297*4882a593Smuzhiyun 	sdhci_writel(host, val, SDHCI_TEGRA_VENDOR_MISC_CTRL);
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	__sdhci_set_timeout(host, cmd);
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun 
sdhci_tegra_cqe_pre_enable(struct mmc_host * mmc)1302*4882a593Smuzhiyun static void sdhci_tegra_cqe_pre_enable(struct mmc_host *mmc)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	struct cqhci_host *cq_host = mmc->cqe_private;
1305*4882a593Smuzhiyun 	u32 reg;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	reg = cqhci_readl(cq_host, CQHCI_CFG);
1308*4882a593Smuzhiyun 	reg |= CQHCI_ENABLE;
1309*4882a593Smuzhiyun 	cqhci_writel(cq_host, reg, CQHCI_CFG);
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun 
sdhci_tegra_cqe_post_disable(struct mmc_host * mmc)1312*4882a593Smuzhiyun static void sdhci_tegra_cqe_post_disable(struct mmc_host *mmc)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun 	struct cqhci_host *cq_host = mmc->cqe_private;
1315*4882a593Smuzhiyun 	struct sdhci_host *host = mmc_priv(mmc);
1316*4882a593Smuzhiyun 	u32 reg;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	reg = cqhci_readl(cq_host, CQHCI_CFG);
1319*4882a593Smuzhiyun 	reg &= ~CQHCI_ENABLE;
1320*4882a593Smuzhiyun 	cqhci_writel(cq_host, reg, CQHCI_CFG);
1321*4882a593Smuzhiyun 	sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
1325*4882a593Smuzhiyun 	.write_l    = tegra_cqhci_writel,
1326*4882a593Smuzhiyun 	.enable	= sdhci_tegra_cqe_enable,
1327*4882a593Smuzhiyun 	.disable = sdhci_cqe_disable,
1328*4882a593Smuzhiyun 	.dumpregs = sdhci_tegra_dumpregs,
1329*4882a593Smuzhiyun 	.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
1330*4882a593Smuzhiyun 	.pre_enable = sdhci_tegra_cqe_pre_enable,
1331*4882a593Smuzhiyun 	.post_disable = sdhci_tegra_cqe_post_disable,
1332*4882a593Smuzhiyun };
1333*4882a593Smuzhiyun 
tegra_sdhci_set_dma_mask(struct sdhci_host * host)1334*4882a593Smuzhiyun static int tegra_sdhci_set_dma_mask(struct sdhci_host *host)
1335*4882a593Smuzhiyun {
1336*4882a593Smuzhiyun 	struct sdhci_pltfm_host *platform = sdhci_priv(host);
1337*4882a593Smuzhiyun 	struct sdhci_tegra *tegra = sdhci_pltfm_priv(platform);
1338*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc = tegra->soc_data;
1339*4882a593Smuzhiyun 	struct device *dev = mmc_dev(host->mmc);
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	if (soc->dma_mask)
1342*4882a593Smuzhiyun 		return dma_set_mask_and_coherent(dev, soc->dma_mask);
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 	return 0;
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun static const struct sdhci_ops tegra_sdhci_ops = {
1348*4882a593Smuzhiyun 	.get_ro     = tegra_sdhci_get_ro,
1349*4882a593Smuzhiyun 	.read_w     = tegra_sdhci_readw,
1350*4882a593Smuzhiyun 	.write_l    = tegra_sdhci_writel,
1351*4882a593Smuzhiyun 	.set_clock  = tegra_sdhci_set_clock,
1352*4882a593Smuzhiyun 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1353*4882a593Smuzhiyun 	.set_bus_width = sdhci_set_bus_width,
1354*4882a593Smuzhiyun 	.reset      = tegra_sdhci_reset,
1355*4882a593Smuzhiyun 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1356*4882a593Smuzhiyun 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1357*4882a593Smuzhiyun 	.voltage_switch = tegra_sdhci_voltage_switch,
1358*4882a593Smuzhiyun 	.get_max_clock = tegra_sdhci_get_max_clock,
1359*4882a593Smuzhiyun };
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
1362*4882a593Smuzhiyun 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1363*4882a593Smuzhiyun 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1364*4882a593Smuzhiyun 		  SDHCI_QUIRK_NO_HISPD_BIT |
1365*4882a593Smuzhiyun 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1366*4882a593Smuzhiyun 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1367*4882a593Smuzhiyun 	.ops  = &tegra_sdhci_ops,
1368*4882a593Smuzhiyun };
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun static const struct sdhci_tegra_soc_data soc_data_tegra20 = {
1371*4882a593Smuzhiyun 	.pdata = &sdhci_tegra20_pdata,
1372*4882a593Smuzhiyun 	.dma_mask = DMA_BIT_MASK(32),
1373*4882a593Smuzhiyun 	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
1374*4882a593Smuzhiyun 		    NVQUIRK_ENABLE_BLOCK_GAP_DET,
1375*4882a593Smuzhiyun };
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun static const struct sdhci_pltfm_data sdhci_tegra30_pdata = {
1378*4882a593Smuzhiyun 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1379*4882a593Smuzhiyun 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1380*4882a593Smuzhiyun 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1381*4882a593Smuzhiyun 		  SDHCI_QUIRK_NO_HISPD_BIT |
1382*4882a593Smuzhiyun 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1383*4882a593Smuzhiyun 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1384*4882a593Smuzhiyun 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
1385*4882a593Smuzhiyun 		   SDHCI_QUIRK2_BROKEN_HS200 |
1386*4882a593Smuzhiyun 		   /*
1387*4882a593Smuzhiyun 		    * Auto-CMD23 leads to "Got command interrupt 0x00010000 even
1388*4882a593Smuzhiyun 		    * though no command operation was in progress."
1389*4882a593Smuzhiyun 		    *
1390*4882a593Smuzhiyun 		    * The exact reason is unknown, as the same hardware seems
1391*4882a593Smuzhiyun 		    * to support Auto CMD23 on a downstream 3.1 kernel.
1392*4882a593Smuzhiyun 		    */
1393*4882a593Smuzhiyun 		   SDHCI_QUIRK2_ACMD23_BROKEN,
1394*4882a593Smuzhiyun 	.ops  = &tegra_sdhci_ops,
1395*4882a593Smuzhiyun };
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun static const struct sdhci_tegra_soc_data soc_data_tegra30 = {
1398*4882a593Smuzhiyun 	.pdata = &sdhci_tegra30_pdata,
1399*4882a593Smuzhiyun 	.dma_mask = DMA_BIT_MASK(32),
1400*4882a593Smuzhiyun 	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 |
1401*4882a593Smuzhiyun 		    NVQUIRK_ENABLE_SDR50 |
1402*4882a593Smuzhiyun 		    NVQUIRK_ENABLE_SDR104 |
1403*4882a593Smuzhiyun 		    NVQUIRK_HAS_PADCALIB,
1404*4882a593Smuzhiyun };
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun static const struct sdhci_ops tegra114_sdhci_ops = {
1407*4882a593Smuzhiyun 	.get_ro     = tegra_sdhci_get_ro,
1408*4882a593Smuzhiyun 	.read_w     = tegra_sdhci_readw,
1409*4882a593Smuzhiyun 	.write_w    = tegra_sdhci_writew,
1410*4882a593Smuzhiyun 	.write_l    = tegra_sdhci_writel,
1411*4882a593Smuzhiyun 	.set_clock  = tegra_sdhci_set_clock,
1412*4882a593Smuzhiyun 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1413*4882a593Smuzhiyun 	.set_bus_width = sdhci_set_bus_width,
1414*4882a593Smuzhiyun 	.reset      = tegra_sdhci_reset,
1415*4882a593Smuzhiyun 	.platform_execute_tuning = tegra_sdhci_execute_tuning,
1416*4882a593Smuzhiyun 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1417*4882a593Smuzhiyun 	.voltage_switch = tegra_sdhci_voltage_switch,
1418*4882a593Smuzhiyun 	.get_max_clock = tegra_sdhci_get_max_clock,
1419*4882a593Smuzhiyun };
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun static const struct sdhci_pltfm_data sdhci_tegra114_pdata = {
1422*4882a593Smuzhiyun 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1423*4882a593Smuzhiyun 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1424*4882a593Smuzhiyun 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1425*4882a593Smuzhiyun 		  SDHCI_QUIRK_NO_HISPD_BIT |
1426*4882a593Smuzhiyun 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1427*4882a593Smuzhiyun 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1428*4882a593Smuzhiyun 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1429*4882a593Smuzhiyun 	.ops  = &tegra114_sdhci_ops,
1430*4882a593Smuzhiyun };
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun static const struct sdhci_tegra_soc_data soc_data_tegra114 = {
1433*4882a593Smuzhiyun 	.pdata = &sdhci_tegra114_pdata,
1434*4882a593Smuzhiyun 	.dma_mask = DMA_BIT_MASK(32),
1435*4882a593Smuzhiyun };
1436*4882a593Smuzhiyun 
1437*4882a593Smuzhiyun static const struct sdhci_pltfm_data sdhci_tegra124_pdata = {
1438*4882a593Smuzhiyun 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1439*4882a593Smuzhiyun 		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
1440*4882a593Smuzhiyun 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1441*4882a593Smuzhiyun 		  SDHCI_QUIRK_NO_HISPD_BIT |
1442*4882a593Smuzhiyun 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1443*4882a593Smuzhiyun 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1444*4882a593Smuzhiyun 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1445*4882a593Smuzhiyun 	.ops  = &tegra114_sdhci_ops,
1446*4882a593Smuzhiyun };
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun static const struct sdhci_tegra_soc_data soc_data_tegra124 = {
1449*4882a593Smuzhiyun 	.pdata = &sdhci_tegra124_pdata,
1450*4882a593Smuzhiyun 	.dma_mask = DMA_BIT_MASK(34),
1451*4882a593Smuzhiyun };
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun static const struct sdhci_ops tegra210_sdhci_ops = {
1454*4882a593Smuzhiyun 	.get_ro     = tegra_sdhci_get_ro,
1455*4882a593Smuzhiyun 	.read_w     = tegra_sdhci_readw,
1456*4882a593Smuzhiyun 	.write_w    = tegra210_sdhci_writew,
1457*4882a593Smuzhiyun 	.write_l    = tegra_sdhci_writel,
1458*4882a593Smuzhiyun 	.set_clock  = tegra_sdhci_set_clock,
1459*4882a593Smuzhiyun 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1460*4882a593Smuzhiyun 	.set_bus_width = sdhci_set_bus_width,
1461*4882a593Smuzhiyun 	.reset      = tegra_sdhci_reset,
1462*4882a593Smuzhiyun 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1463*4882a593Smuzhiyun 	.voltage_switch = tegra_sdhci_voltage_switch,
1464*4882a593Smuzhiyun 	.get_max_clock = tegra_sdhci_get_max_clock,
1465*4882a593Smuzhiyun 	.set_timeout = tegra_sdhci_set_timeout,
1466*4882a593Smuzhiyun };
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
1469*4882a593Smuzhiyun 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1470*4882a593Smuzhiyun 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1471*4882a593Smuzhiyun 		  SDHCI_QUIRK_NO_HISPD_BIT |
1472*4882a593Smuzhiyun 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1473*4882a593Smuzhiyun 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1474*4882a593Smuzhiyun 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1475*4882a593Smuzhiyun 	.ops  = &tegra210_sdhci_ops,
1476*4882a593Smuzhiyun };
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
1479*4882a593Smuzhiyun 	.pdata = &sdhci_tegra210_pdata,
1480*4882a593Smuzhiyun 	.dma_mask = DMA_BIT_MASK(34),
1481*4882a593Smuzhiyun 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1482*4882a593Smuzhiyun 		    NVQUIRK_HAS_PADCALIB |
1483*4882a593Smuzhiyun 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1484*4882a593Smuzhiyun 		    NVQUIRK_ENABLE_SDR50 |
1485*4882a593Smuzhiyun 		    NVQUIRK_ENABLE_SDR104 |
1486*4882a593Smuzhiyun 		    NVQUIRK_HAS_TMCLK,
1487*4882a593Smuzhiyun 	.min_tap_delay = 106,
1488*4882a593Smuzhiyun 	.max_tap_delay = 185,
1489*4882a593Smuzhiyun };
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun static const struct sdhci_ops tegra186_sdhci_ops = {
1492*4882a593Smuzhiyun 	.get_ro     = tegra_sdhci_get_ro,
1493*4882a593Smuzhiyun 	.read_w     = tegra_sdhci_readw,
1494*4882a593Smuzhiyun 	.write_l    = tegra_sdhci_writel,
1495*4882a593Smuzhiyun 	.set_clock  = tegra_sdhci_set_clock,
1496*4882a593Smuzhiyun 	.set_dma_mask = tegra_sdhci_set_dma_mask,
1497*4882a593Smuzhiyun 	.set_bus_width = sdhci_set_bus_width,
1498*4882a593Smuzhiyun 	.reset      = tegra_sdhci_reset,
1499*4882a593Smuzhiyun 	.set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
1500*4882a593Smuzhiyun 	.voltage_switch = tegra_sdhci_voltage_switch,
1501*4882a593Smuzhiyun 	.get_max_clock = tegra_sdhci_get_max_clock,
1502*4882a593Smuzhiyun 	.irq = sdhci_tegra_cqhci_irq,
1503*4882a593Smuzhiyun 	.set_timeout = tegra_sdhci_set_timeout,
1504*4882a593Smuzhiyun };
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
1507*4882a593Smuzhiyun 	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
1508*4882a593Smuzhiyun 		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
1509*4882a593Smuzhiyun 		  SDHCI_QUIRK_NO_HISPD_BIT |
1510*4882a593Smuzhiyun 		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
1511*4882a593Smuzhiyun 		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
1512*4882a593Smuzhiyun 	.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
1513*4882a593Smuzhiyun 	.ops  = &tegra186_sdhci_ops,
1514*4882a593Smuzhiyun };
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
1517*4882a593Smuzhiyun 	.pdata = &sdhci_tegra186_pdata,
1518*4882a593Smuzhiyun 	.dma_mask = DMA_BIT_MASK(40),
1519*4882a593Smuzhiyun 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1520*4882a593Smuzhiyun 		    NVQUIRK_HAS_PADCALIB |
1521*4882a593Smuzhiyun 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1522*4882a593Smuzhiyun 		    NVQUIRK_ENABLE_SDR50 |
1523*4882a593Smuzhiyun 		    NVQUIRK_ENABLE_SDR104 |
1524*4882a593Smuzhiyun 		    NVQUIRK_HAS_TMCLK |
1525*4882a593Smuzhiyun 		    NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
1526*4882a593Smuzhiyun 	.min_tap_delay = 84,
1527*4882a593Smuzhiyun 	.max_tap_delay = 136,
1528*4882a593Smuzhiyun };
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
1531*4882a593Smuzhiyun 	.pdata = &sdhci_tegra186_pdata,
1532*4882a593Smuzhiyun 	.dma_mask = DMA_BIT_MASK(39),
1533*4882a593Smuzhiyun 	.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
1534*4882a593Smuzhiyun 		    NVQUIRK_HAS_PADCALIB |
1535*4882a593Smuzhiyun 		    NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
1536*4882a593Smuzhiyun 		    NVQUIRK_ENABLE_SDR50 |
1537*4882a593Smuzhiyun 		    NVQUIRK_ENABLE_SDR104 |
1538*4882a593Smuzhiyun 		    NVQUIRK_HAS_TMCLK,
1539*4882a593Smuzhiyun 	.min_tap_delay = 96,
1540*4882a593Smuzhiyun 	.max_tap_delay = 139,
1541*4882a593Smuzhiyun };
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun static const struct of_device_id sdhci_tegra_dt_match[] = {
1544*4882a593Smuzhiyun 	{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
1545*4882a593Smuzhiyun 	{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
1546*4882a593Smuzhiyun 	{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
1547*4882a593Smuzhiyun 	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
1548*4882a593Smuzhiyun 	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 },
1549*4882a593Smuzhiyun 	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 },
1550*4882a593Smuzhiyun 	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
1551*4882a593Smuzhiyun 	{}
1552*4882a593Smuzhiyun };
1553*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match);
1554*4882a593Smuzhiyun 
sdhci_tegra_add_host(struct sdhci_host * host)1555*4882a593Smuzhiyun static int sdhci_tegra_add_host(struct sdhci_host *host)
1556*4882a593Smuzhiyun {
1557*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1558*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1559*4882a593Smuzhiyun 	struct cqhci_host *cq_host;
1560*4882a593Smuzhiyun 	bool dma64;
1561*4882a593Smuzhiyun 	int ret;
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 	if (!tegra_host->enable_hwcq)
1564*4882a593Smuzhiyun 		return sdhci_add_host(host);
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	sdhci_enable_v4_mode(host);
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 	ret = sdhci_setup_host(host);
1569*4882a593Smuzhiyun 	if (ret)
1570*4882a593Smuzhiyun 		return ret;
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	cq_host = devm_kzalloc(host->mmc->parent,
1575*4882a593Smuzhiyun 				sizeof(*cq_host), GFP_KERNEL);
1576*4882a593Smuzhiyun 	if (!cq_host) {
1577*4882a593Smuzhiyun 		ret = -ENOMEM;
1578*4882a593Smuzhiyun 		goto cleanup;
1579*4882a593Smuzhiyun 	}
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	cq_host->mmio = host->ioaddr + SDHCI_TEGRA_CQE_BASE_ADDR;
1582*4882a593Smuzhiyun 	cq_host->ops = &sdhci_tegra_cqhci_ops;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
1585*4882a593Smuzhiyun 	if (dma64)
1586*4882a593Smuzhiyun 		cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	ret = cqhci_init(cq_host, host->mmc, dma64);
1589*4882a593Smuzhiyun 	if (ret)
1590*4882a593Smuzhiyun 		goto cleanup;
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	ret = __sdhci_add_host(host);
1593*4882a593Smuzhiyun 	if (ret)
1594*4882a593Smuzhiyun 		goto cleanup;
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	return 0;
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun cleanup:
1599*4882a593Smuzhiyun 	sdhci_cleanup_host(host);
1600*4882a593Smuzhiyun 	return ret;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun 
sdhci_tegra_probe(struct platform_device * pdev)1603*4882a593Smuzhiyun static int sdhci_tegra_probe(struct platform_device *pdev)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun 	const struct of_device_id *match;
1606*4882a593Smuzhiyun 	const struct sdhci_tegra_soc_data *soc_data;
1607*4882a593Smuzhiyun 	struct sdhci_host *host;
1608*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host;
1609*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host;
1610*4882a593Smuzhiyun 	struct clk *clk;
1611*4882a593Smuzhiyun 	int rc;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
1614*4882a593Smuzhiyun 	if (!match)
1615*4882a593Smuzhiyun 		return -EINVAL;
1616*4882a593Smuzhiyun 	soc_data = match->data;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*tegra_host));
1619*4882a593Smuzhiyun 	if (IS_ERR(host))
1620*4882a593Smuzhiyun 		return PTR_ERR(host);
1621*4882a593Smuzhiyun 	pltfm_host = sdhci_priv(host);
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	tegra_host = sdhci_pltfm_priv(pltfm_host);
1624*4882a593Smuzhiyun 	tegra_host->ddr_signaling = false;
1625*4882a593Smuzhiyun 	tegra_host->pad_calib_required = false;
1626*4882a593Smuzhiyun 	tegra_host->pad_control_available = false;
1627*4882a593Smuzhiyun 	tegra_host->soc_data = soc_data;
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 	if (soc_data->nvquirks & NVQUIRK_NEEDS_PAD_CONTROL) {
1630*4882a593Smuzhiyun 		rc = tegra_sdhci_init_pinctrl_info(&pdev->dev, tegra_host);
1631*4882a593Smuzhiyun 		if (rc == 0)
1632*4882a593Smuzhiyun 			host->mmc_host_ops.start_signal_voltage_switch =
1633*4882a593Smuzhiyun 				sdhci_tegra_start_signal_voltage_switch;
1634*4882a593Smuzhiyun 	}
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 	/* Hook to periodically rerun pad calibration */
1637*4882a593Smuzhiyun 	if (soc_data->nvquirks & NVQUIRK_HAS_PADCALIB)
1638*4882a593Smuzhiyun 		host->mmc_host_ops.request = tegra_sdhci_request;
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	host->mmc_host_ops.hs400_enhanced_strobe =
1641*4882a593Smuzhiyun 			tegra_sdhci_hs400_enhanced_strobe;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	if (!host->ops->platform_execute_tuning)
1644*4882a593Smuzhiyun 		host->mmc_host_ops.execute_tuning =
1645*4882a593Smuzhiyun 				tegra_sdhci_execute_hw_tuning;
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	rc = mmc_of_parse(host->mmc);
1648*4882a593Smuzhiyun 	if (rc)
1649*4882a593Smuzhiyun 		goto err_parse_dt;
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1652*4882a593Smuzhiyun 		host->mmc->caps |= MMC_CAP_1_8V_DDR;
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun 	/* HW busy detection is supported, but R1B responses are required. */
1655*4882a593Smuzhiyun 	host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	tegra_sdhci_parse_dt(host);
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1660*4882a593Smuzhiyun 							 GPIOD_OUT_HIGH);
1661*4882a593Smuzhiyun 	if (IS_ERR(tegra_host->power_gpio)) {
1662*4882a593Smuzhiyun 		rc = PTR_ERR(tegra_host->power_gpio);
1663*4882a593Smuzhiyun 		goto err_power_req;
1664*4882a593Smuzhiyun 	}
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 	/*
1667*4882a593Smuzhiyun 	 * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
1668*4882a593Smuzhiyun 	 * timeout clock and SW can choose TMCLK or SDCLK for hardware
1669*4882a593Smuzhiyun 	 * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
1670*4882a593Smuzhiyun 	 * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
1671*4882a593Smuzhiyun 	 *
1672*4882a593Smuzhiyun 	 * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
1673*4882a593Smuzhiyun 	 * 12Mhz TMCLK which is advertised in host capability register.
1674*4882a593Smuzhiyun 	 * With TMCLK of 12Mhz provides maximum data timeout period that can
1675*4882a593Smuzhiyun 	 * be achieved is 11s better than using SDCLK for data timeout.
1676*4882a593Smuzhiyun 	 *
1677*4882a593Smuzhiyun 	 * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
1678*4882a593Smuzhiyun 	 * supporting separate TMCLK.
1679*4882a593Smuzhiyun 	 */
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
1682*4882a593Smuzhiyun 		clk = devm_clk_get(&pdev->dev, "tmclk");
1683*4882a593Smuzhiyun 		if (IS_ERR(clk)) {
1684*4882a593Smuzhiyun 			rc = PTR_ERR(clk);
1685*4882a593Smuzhiyun 			if (rc == -EPROBE_DEFER)
1686*4882a593Smuzhiyun 				goto err_power_req;
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 			dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
1689*4882a593Smuzhiyun 			clk = NULL;
1690*4882a593Smuzhiyun 		}
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 		clk_set_rate(clk, 12000000);
1693*4882a593Smuzhiyun 		rc = clk_prepare_enable(clk);
1694*4882a593Smuzhiyun 		if (rc) {
1695*4882a593Smuzhiyun 			dev_err(&pdev->dev,
1696*4882a593Smuzhiyun 				"failed to enable tmclk: %d\n", rc);
1697*4882a593Smuzhiyun 			goto err_power_req;
1698*4882a593Smuzhiyun 		}
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 		tegra_host->tmclk = clk;
1701*4882a593Smuzhiyun 	}
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	clk = devm_clk_get(mmc_dev(host->mmc), NULL);
1704*4882a593Smuzhiyun 	if (IS_ERR(clk)) {
1705*4882a593Smuzhiyun 		rc = dev_err_probe(&pdev->dev, PTR_ERR(clk),
1706*4882a593Smuzhiyun 				   "failed to get clock\n");
1707*4882a593Smuzhiyun 		goto err_clk_get;
1708*4882a593Smuzhiyun 	}
1709*4882a593Smuzhiyun 	clk_prepare_enable(clk);
1710*4882a593Smuzhiyun 	pltfm_host->clk = clk;
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	tegra_host->rst = devm_reset_control_get_exclusive(&pdev->dev,
1713*4882a593Smuzhiyun 							   "sdhci");
1714*4882a593Smuzhiyun 	if (IS_ERR(tegra_host->rst)) {
1715*4882a593Smuzhiyun 		rc = PTR_ERR(tegra_host->rst);
1716*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to get reset control: %d\n", rc);
1717*4882a593Smuzhiyun 		goto err_rst_get;
1718*4882a593Smuzhiyun 	}
1719*4882a593Smuzhiyun 
1720*4882a593Smuzhiyun 	rc = reset_control_assert(tegra_host->rst);
1721*4882a593Smuzhiyun 	if (rc)
1722*4882a593Smuzhiyun 		goto err_rst_get;
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 	usleep_range(2000, 4000);
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	rc = reset_control_deassert(tegra_host->rst);
1727*4882a593Smuzhiyun 	if (rc)
1728*4882a593Smuzhiyun 		goto err_rst_get;
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 	usleep_range(2000, 4000);
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	rc = sdhci_tegra_add_host(host);
1733*4882a593Smuzhiyun 	if (rc)
1734*4882a593Smuzhiyun 		goto err_add_host;
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	return 0;
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun err_add_host:
1739*4882a593Smuzhiyun 	reset_control_assert(tegra_host->rst);
1740*4882a593Smuzhiyun err_rst_get:
1741*4882a593Smuzhiyun 	clk_disable_unprepare(pltfm_host->clk);
1742*4882a593Smuzhiyun err_clk_get:
1743*4882a593Smuzhiyun 	clk_disable_unprepare(tegra_host->tmclk);
1744*4882a593Smuzhiyun err_power_req:
1745*4882a593Smuzhiyun err_parse_dt:
1746*4882a593Smuzhiyun 	sdhci_pltfm_free(pdev);
1747*4882a593Smuzhiyun 	return rc;
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun 
sdhci_tegra_remove(struct platform_device * pdev)1750*4882a593Smuzhiyun static int sdhci_tegra_remove(struct platform_device *pdev)
1751*4882a593Smuzhiyun {
1752*4882a593Smuzhiyun 	struct sdhci_host *host = platform_get_drvdata(pdev);
1753*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1754*4882a593Smuzhiyun 	struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	sdhci_remove_host(host, 0);
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun 	reset_control_assert(tegra_host->rst);
1759*4882a593Smuzhiyun 	usleep_range(2000, 4000);
1760*4882a593Smuzhiyun 	clk_disable_unprepare(pltfm_host->clk);
1761*4882a593Smuzhiyun 	clk_disable_unprepare(tegra_host->tmclk);
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	sdhci_pltfm_free(pdev);
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun 	return 0;
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
sdhci_tegra_suspend(struct device * dev)1769*4882a593Smuzhiyun static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
1770*4882a593Smuzhiyun {
1771*4882a593Smuzhiyun 	struct sdhci_host *host = dev_get_drvdata(dev);
1772*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1773*4882a593Smuzhiyun 	int ret;
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1776*4882a593Smuzhiyun 		ret = cqhci_suspend(host->mmc);
1777*4882a593Smuzhiyun 		if (ret)
1778*4882a593Smuzhiyun 			return ret;
1779*4882a593Smuzhiyun 	}
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 	ret = sdhci_suspend_host(host);
1782*4882a593Smuzhiyun 	if (ret) {
1783*4882a593Smuzhiyun 		cqhci_resume(host->mmc);
1784*4882a593Smuzhiyun 		return ret;
1785*4882a593Smuzhiyun 	}
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	clk_disable_unprepare(pltfm_host->clk);
1788*4882a593Smuzhiyun 	return 0;
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun 
sdhci_tegra_resume(struct device * dev)1791*4882a593Smuzhiyun static int __maybe_unused sdhci_tegra_resume(struct device *dev)
1792*4882a593Smuzhiyun {
1793*4882a593Smuzhiyun 	struct sdhci_host *host = dev_get_drvdata(dev);
1794*4882a593Smuzhiyun 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1795*4882a593Smuzhiyun 	int ret;
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	ret = clk_prepare_enable(pltfm_host->clk);
1798*4882a593Smuzhiyun 	if (ret)
1799*4882a593Smuzhiyun 		return ret;
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	ret = sdhci_resume_host(host);
1802*4882a593Smuzhiyun 	if (ret)
1803*4882a593Smuzhiyun 		goto disable_clk;
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	if (host->mmc->caps2 & MMC_CAP2_CQE) {
1806*4882a593Smuzhiyun 		ret = cqhci_resume(host->mmc);
1807*4882a593Smuzhiyun 		if (ret)
1808*4882a593Smuzhiyun 			goto suspend_host;
1809*4882a593Smuzhiyun 	}
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 	return 0;
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun suspend_host:
1814*4882a593Smuzhiyun 	sdhci_suspend_host(host);
1815*4882a593Smuzhiyun disable_clk:
1816*4882a593Smuzhiyun 	clk_disable_unprepare(pltfm_host->clk);
1817*4882a593Smuzhiyun 	return ret;
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun #endif
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
1822*4882a593Smuzhiyun 			 sdhci_tegra_resume);
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun static struct platform_driver sdhci_tegra_driver = {
1825*4882a593Smuzhiyun 	.driver		= {
1826*4882a593Smuzhiyun 		.name	= "sdhci-tegra",
1827*4882a593Smuzhiyun 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1828*4882a593Smuzhiyun 		.of_match_table = sdhci_tegra_dt_match,
1829*4882a593Smuzhiyun 		.pm	= &sdhci_tegra_dev_pm_ops,
1830*4882a593Smuzhiyun 	},
1831*4882a593Smuzhiyun 	.probe		= sdhci_tegra_probe,
1832*4882a593Smuzhiyun 	.remove		= sdhci_tegra_remove,
1833*4882a593Smuzhiyun };
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun module_platform_driver(sdhci_tegra_driver);
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun MODULE_DESCRIPTION("SDHCI driver for Tegra");
1838*4882a593Smuzhiyun MODULE_AUTHOR("Google, Inc.");
1839*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1840