xref: /OK3568_Linux_fs/u-boot/arch/arm/mach-imx/mx6/ddr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2014 Gateworks Corporation
3*4882a593Smuzhiyun  * Author: Tim Harvey <tharvey@gateworks.com>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * SPDX-License-Identifier:     GPL-2.0+
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <common.h>
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <asm/arch/clock.h>
11*4882a593Smuzhiyun #include <asm/arch/mx6-ddr.h>
12*4882a593Smuzhiyun #include <asm/arch/sys_proto.h>
13*4882a593Smuzhiyun #include <asm/io.h>
14*4882a593Smuzhiyun #include <asm/types.h>
15*4882a593Smuzhiyun #include <wait_bit.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #if defined(CONFIG_MX6_DDRCAL)
reset_read_data_fifos(void)18*4882a593Smuzhiyun static void reset_read_data_fifos(void)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	/* Reset data FIFOs twice. */
23*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mpdgctrl0, 1 << 31);
24*4882a593Smuzhiyun 	wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0);
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mpdgctrl0, 1 << 31);
27*4882a593Smuzhiyun 	wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 31, 0, 100, 0);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
precharge_all(const bool cs0_enable,const bool cs1_enable)30*4882a593Smuzhiyun static void precharge_all(const bool cs0_enable, const bool cs1_enable)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	/*
35*4882a593Smuzhiyun 	 * Issue the Precharge-All command to the DDR device for both
36*4882a593Smuzhiyun 	 * chip selects. Note, CON_REQ bit should also remain set. If
37*4882a593Smuzhiyun 	 * only using one chip select, then precharge only the desired
38*4882a593Smuzhiyun 	 * chip select.
39*4882a593Smuzhiyun 	 */
40*4882a593Smuzhiyun 	if (cs0_enable) { /* CS0 */
41*4882a593Smuzhiyun 		writel(0x04008050, &mmdc0->mdscr);
42*4882a593Smuzhiyun 		wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	if (cs1_enable) { /* CS1 */
46*4882a593Smuzhiyun 		writel(0x04008058, &mmdc0->mdscr);
47*4882a593Smuzhiyun 		wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
force_delay_measurement(int bus_size)51*4882a593Smuzhiyun static void force_delay_measurement(int bus_size)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
54*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	writel(0x800, &mmdc0->mpmur0);
57*4882a593Smuzhiyun 	if (bus_size == 0x2)
58*4882a593Smuzhiyun 		writel(0x800, &mmdc1->mpmur0);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
modify_dg_result(u32 * reg_st0,u32 * reg_st1,u32 * reg_ctrl)61*4882a593Smuzhiyun static void modify_dg_result(u32 *reg_st0, u32 *reg_st1, u32 *reg_ctrl)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	u32 dg_tmp_val, dg_dl_abs_offset, dg_hc_del, val_ctrl;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	/*
66*4882a593Smuzhiyun 	 * DQS gating absolute offset should be modified from reflecting
67*4882a593Smuzhiyun 	 * (HW_DG_LOWx + HW_DG_UPx)/2 to reflecting (HW_DG_UPx - 0x80)
68*4882a593Smuzhiyun 	 */
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	val_ctrl = readl(reg_ctrl);
71*4882a593Smuzhiyun 	val_ctrl &= 0xf0000000;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	dg_tmp_val = ((readl(reg_st0) & 0x07ff0000) >> 16) - 0xc0;
74*4882a593Smuzhiyun 	dg_dl_abs_offset = dg_tmp_val & 0x7f;
75*4882a593Smuzhiyun 	dg_hc_del = (dg_tmp_val & 0x780) << 1;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	val_ctrl |= dg_dl_abs_offset + dg_hc_del;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	dg_tmp_val = ((readl(reg_st1) & 0x07ff0000) >> 16) - 0xc0;
80*4882a593Smuzhiyun 	dg_dl_abs_offset = dg_tmp_val & 0x7f;
81*4882a593Smuzhiyun 	dg_hc_del = (dg_tmp_val & 0x780) << 1;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	val_ctrl |= (dg_dl_abs_offset + dg_hc_del) << 16;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	writel(val_ctrl, reg_ctrl);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun 
mmdc_do_write_level_calibration(struct mx6_ddr_sysinfo const * sysinfo)88*4882a593Smuzhiyun int mmdc_do_write_level_calibration(struct mx6_ddr_sysinfo const *sysinfo)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
91*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
92*4882a593Smuzhiyun 	u32 esdmisc_val, zq_val;
93*4882a593Smuzhiyun 	u32 errors = 0;
94*4882a593Smuzhiyun 	u32 ldectrl[4] = {0};
95*4882a593Smuzhiyun 	u32 ddr_mr1 = 0x4;
96*4882a593Smuzhiyun 	u32 rwalat_max;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	/*
99*4882a593Smuzhiyun 	 * Stash old values in case calibration fails,
100*4882a593Smuzhiyun 	 * we need to restore them
101*4882a593Smuzhiyun 	 */
102*4882a593Smuzhiyun 	ldectrl[0] = readl(&mmdc0->mpwldectrl0);
103*4882a593Smuzhiyun 	ldectrl[1] = readl(&mmdc0->mpwldectrl1);
104*4882a593Smuzhiyun 	if (sysinfo->dsize == 2) {
105*4882a593Smuzhiyun 		ldectrl[2] = readl(&mmdc1->mpwldectrl0);
106*4882a593Smuzhiyun 		ldectrl[3] = readl(&mmdc1->mpwldectrl1);
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	/* disable DDR logic power down timer */
110*4882a593Smuzhiyun 	clrbits_le32(&mmdc0->mdpdc, 0xff00);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/* disable Adopt power down timer */
113*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mapsr, 0x1);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	debug("Starting write leveling calibration.\n");
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/*
118*4882a593Smuzhiyun 	 * 2. disable auto refresh and ZQ calibration
119*4882a593Smuzhiyun 	 * before proceeding with Write Leveling calibration
120*4882a593Smuzhiyun 	 */
121*4882a593Smuzhiyun 	esdmisc_val = readl(&mmdc0->mdref);
122*4882a593Smuzhiyun 	writel(0x0000C000, &mmdc0->mdref);
123*4882a593Smuzhiyun 	zq_val = readl(&mmdc0->mpzqhwctrl);
124*4882a593Smuzhiyun 	writel(zq_val & ~0x3, &mmdc0->mpzqhwctrl);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* 3. increase walat and ralat to maximum */
127*4882a593Smuzhiyun 	rwalat_max = (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17);
128*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mdmisc, rwalat_max);
129*4882a593Smuzhiyun 	if (sysinfo->dsize == 2)
130*4882a593Smuzhiyun 		setbits_le32(&mmdc1->mdmisc, rwalat_max);
131*4882a593Smuzhiyun 	/*
132*4882a593Smuzhiyun 	 * 4 & 5. Configure the external DDR device to enter write-leveling
133*4882a593Smuzhiyun 	 * mode through Load Mode Register command.
134*4882a593Smuzhiyun 	 * Register setting:
135*4882a593Smuzhiyun 	 * Bits[31:16] MR1 value (0x0080 write leveling enable)
136*4882a593Smuzhiyun 	 * Bit[9] set WL_EN to enable MMDC DQS output
137*4882a593Smuzhiyun 	 * Bits[6:4] set CMD bits for Load Mode Register programming
138*4882a593Smuzhiyun 	 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	writel(0x00808231, &mmdc0->mdscr);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* 6. Activate automatic calibration by setting MPWLGCR[HW_WL_EN] */
143*4882a593Smuzhiyun 	writel(0x00000001, &mmdc0->mpwlgcr);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/*
146*4882a593Smuzhiyun 	 * 7. Upon completion of this process the MMDC de-asserts
147*4882a593Smuzhiyun 	 * the MPWLGCR[HW_WL_EN]
148*4882a593Smuzhiyun 	 */
149*4882a593Smuzhiyun 	wait_for_bit_le32(&mmdc0->mpwlgcr, 1 << 0, 0, 100, 0);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/*
152*4882a593Smuzhiyun 	 * 8. check for any errors: check both PHYs for x64 configuration,
153*4882a593Smuzhiyun 	 * if x32, check only PHY0
154*4882a593Smuzhiyun 	 */
155*4882a593Smuzhiyun 	if (readl(&mmdc0->mpwlgcr) & 0x00000F00)
156*4882a593Smuzhiyun 		errors |= 1;
157*4882a593Smuzhiyun 	if (sysinfo->dsize == 2)
158*4882a593Smuzhiyun 		if (readl(&mmdc1->mpwlgcr) & 0x00000F00)
159*4882a593Smuzhiyun 			errors |= 2;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	debug("Ending write leveling calibration. Error mask: 0x%x\n", errors);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/* check to see if cal failed */
164*4882a593Smuzhiyun 	if ((readl(&mmdc0->mpwldectrl0) == 0x001F001F) &&
165*4882a593Smuzhiyun 	    (readl(&mmdc0->mpwldectrl1) == 0x001F001F) &&
166*4882a593Smuzhiyun 	    ((sysinfo->dsize < 2) ||
167*4882a593Smuzhiyun 	     ((readl(&mmdc1->mpwldectrl0) == 0x001F001F) &&
168*4882a593Smuzhiyun 	      (readl(&mmdc1->mpwldectrl1) == 0x001F001F)))) {
169*4882a593Smuzhiyun 		debug("Cal seems to have soft-failed due to memory not supporting write leveling on all channels. Restoring original write leveling values.\n");
170*4882a593Smuzhiyun 		writel(ldectrl[0], &mmdc0->mpwldectrl0);
171*4882a593Smuzhiyun 		writel(ldectrl[1], &mmdc0->mpwldectrl1);
172*4882a593Smuzhiyun 		if (sysinfo->dsize == 2) {
173*4882a593Smuzhiyun 			writel(ldectrl[2], &mmdc1->mpwldectrl0);
174*4882a593Smuzhiyun 			writel(ldectrl[3], &mmdc1->mpwldectrl1);
175*4882a593Smuzhiyun 		}
176*4882a593Smuzhiyun 		errors |= 4;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/*
180*4882a593Smuzhiyun 	 * User should issue MRS command to exit write leveling mode
181*4882a593Smuzhiyun 	 * through Load Mode Register command
182*4882a593Smuzhiyun 	 * Register setting:
183*4882a593Smuzhiyun 	 * Bits[31:16] MR1 value "ddr_mr1" value from initialization
184*4882a593Smuzhiyun 	 * Bit[9] clear WL_EN to disable MMDC DQS output
185*4882a593Smuzhiyun 	 * Bits[6:4] set CMD bits for Load Mode Register programming
186*4882a593Smuzhiyun 	 * Bits[2:0] set CMD_BA to 0x1 for DDR MR1 programming
187*4882a593Smuzhiyun 	 */
188*4882a593Smuzhiyun 	writel((ddr_mr1 << 16) + 0x8031, &mmdc0->mdscr);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/* re-enable auto refresh and zq cal */
191*4882a593Smuzhiyun 	writel(esdmisc_val, &mmdc0->mdref);
192*4882a593Smuzhiyun 	writel(zq_val, &mmdc0->mpzqhwctrl);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n",
195*4882a593Smuzhiyun 	      readl(&mmdc0->mpwldectrl0));
196*4882a593Smuzhiyun 	debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n",
197*4882a593Smuzhiyun 	      readl(&mmdc0->mpwldectrl1));
198*4882a593Smuzhiyun 	if (sysinfo->dsize == 2) {
199*4882a593Smuzhiyun 		debug("\tMMDC_MPWLDECTRL0 after write level cal: 0x%08X\n",
200*4882a593Smuzhiyun 		      readl(&mmdc1->mpwldectrl0));
201*4882a593Smuzhiyun 		debug("\tMMDC_MPWLDECTRL1 after write level cal: 0x%08X\n",
202*4882a593Smuzhiyun 		      readl(&mmdc1->mpwldectrl1));
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/* We must force a readback of these values, to get them to stick */
206*4882a593Smuzhiyun 	readl(&mmdc0->mpwldectrl0);
207*4882a593Smuzhiyun 	readl(&mmdc0->mpwldectrl1);
208*4882a593Smuzhiyun 	if (sysinfo->dsize == 2) {
209*4882a593Smuzhiyun 		readl(&mmdc1->mpwldectrl0);
210*4882a593Smuzhiyun 		readl(&mmdc1->mpwldectrl1);
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* enable DDR logic power down timer: */
214*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mdpdc, 0x00005500);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	/* Enable Adopt power down timer: */
217*4882a593Smuzhiyun 	clrbits_le32(&mmdc0->mapsr, 0x1);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* Clear CON_REQ */
220*4882a593Smuzhiyun 	writel(0, &mmdc0->mdscr);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	return errors;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
mmdc_do_dqs_calibration(struct mx6_ddr_sysinfo const * sysinfo)225*4882a593Smuzhiyun int mmdc_do_dqs_calibration(struct mx6_ddr_sysinfo const *sysinfo)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
228*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
229*4882a593Smuzhiyun 	struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux =
230*4882a593Smuzhiyun 		(struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE;
231*4882a593Smuzhiyun 	bool cs0_enable;
232*4882a593Smuzhiyun 	bool cs1_enable;
233*4882a593Smuzhiyun 	bool cs0_enable_initial;
234*4882a593Smuzhiyun 	bool cs1_enable_initial;
235*4882a593Smuzhiyun 	u32 esdmisc_val;
236*4882a593Smuzhiyun 	u32 temp_ref;
237*4882a593Smuzhiyun 	u32 pddword = 0x00ffff00; /* best so far, place into MPPDCMPR1 */
238*4882a593Smuzhiyun 	u32 errors = 0;
239*4882a593Smuzhiyun 	u32 initdelay = 0x40404040;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* check to see which chip selects are enabled */
242*4882a593Smuzhiyun 	cs0_enable_initial = readl(&mmdc0->mdctl) & 0x80000000;
243*4882a593Smuzhiyun 	cs1_enable_initial = readl(&mmdc0->mdctl) & 0x40000000;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* disable DDR logic power down timer: */
246*4882a593Smuzhiyun 	clrbits_le32(&mmdc0->mdpdc, 0xff00);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/* disable Adopt power down timer: */
249*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mapsr, 0x1);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/* set DQS pull ups */
252*4882a593Smuzhiyun 	setbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000);
253*4882a593Smuzhiyun 	setbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000);
254*4882a593Smuzhiyun 	setbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000);
255*4882a593Smuzhiyun 	setbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000);
256*4882a593Smuzhiyun 	setbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000);
257*4882a593Smuzhiyun 	setbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000);
258*4882a593Smuzhiyun 	setbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000);
259*4882a593Smuzhiyun 	setbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	/* Save old RALAT and WALAT values */
262*4882a593Smuzhiyun 	esdmisc_val = readl(&mmdc0->mdmisc);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mdmisc,
265*4882a593Smuzhiyun 		     (1 << 6) | (1 << 7) | (1 << 8) | (1 << 16) | (1 << 17));
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* Disable auto refresh before proceeding with calibration */
268*4882a593Smuzhiyun 	temp_ref = readl(&mmdc0->mdref);
269*4882a593Smuzhiyun 	writel(0x0000c000, &mmdc0->mdref);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/*
272*4882a593Smuzhiyun 	 * Per the ref manual, issue one refresh cycle MDSCR[CMD]= 0x2,
273*4882a593Smuzhiyun 	 * this also sets the CON_REQ bit.
274*4882a593Smuzhiyun 	 */
275*4882a593Smuzhiyun 	if (cs0_enable_initial)
276*4882a593Smuzhiyun 		writel(0x00008020, &mmdc0->mdscr);
277*4882a593Smuzhiyun 	if (cs1_enable_initial)
278*4882a593Smuzhiyun 		writel(0x00008028, &mmdc0->mdscr);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* poll to make sure the con_ack bit was asserted */
281*4882a593Smuzhiyun 	wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 1, 100, 0);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/*
284*4882a593Smuzhiyun 	 * Check MDMISC register CALIB_PER_CS to see which CS calibration
285*4882a593Smuzhiyun 	 * is targeted to (under normal cases, it should be cleared
286*4882a593Smuzhiyun 	 * as this is the default value, indicating calibration is directed
287*4882a593Smuzhiyun 	 * to CS0).
288*4882a593Smuzhiyun 	 * Disable the other chip select not being target for calibration
289*4882a593Smuzhiyun 	 * to avoid any potential issues.  This will get re-enabled at end
290*4882a593Smuzhiyun 	 * of calibration.
291*4882a593Smuzhiyun 	 */
292*4882a593Smuzhiyun 	if ((readl(&mmdc0->mdmisc) & 0x00100000) == 0)
293*4882a593Smuzhiyun 		clrbits_le32(&mmdc0->mdctl, 1 << 30);	/* clear SDE_1 */
294*4882a593Smuzhiyun 	else
295*4882a593Smuzhiyun 		clrbits_le32(&mmdc0->mdctl, 1 << 31);	/* clear SDE_0 */
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/*
298*4882a593Smuzhiyun 	 * Check to see which chip selects are now enabled for
299*4882a593Smuzhiyun 	 * the remainder of the calibration.
300*4882a593Smuzhiyun 	 */
301*4882a593Smuzhiyun 	cs0_enable = readl(&mmdc0->mdctl) & 0x80000000;
302*4882a593Smuzhiyun 	cs1_enable = readl(&mmdc0->mdctl) & 0x40000000;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	precharge_all(cs0_enable, cs1_enable);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* Write the pre-defined value into MPPDCMPR1 */
307*4882a593Smuzhiyun 	writel(pddword, &mmdc0->mppdcmpr1);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	/*
310*4882a593Smuzhiyun 	 * Issue a write access to the external DDR device by setting
311*4882a593Smuzhiyun 	 * the bit SW_DUMMY_WR (bit 0) in the MPSWDAR0 and then poll
312*4882a593Smuzhiyun 	 * this bit until it clears to indicate completion of the write access.
313*4882a593Smuzhiyun 	 */
314*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mpswdar0, 1);
315*4882a593Smuzhiyun 	wait_for_bit_le32(&mmdc0->mpswdar0, 1 << 0, 0, 100, 0);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/* Set the RD_DL_ABS# bits to their default values
318*4882a593Smuzhiyun 	 * (will be calibrated later in the read delay-line calibration).
319*4882a593Smuzhiyun 	 * Both PHYs for x64 configuration, if x32, do only PHY0.
320*4882a593Smuzhiyun 	 */
321*4882a593Smuzhiyun 	writel(initdelay, &mmdc0->mprddlctl);
322*4882a593Smuzhiyun 	if (sysinfo->dsize == 0x2)
323*4882a593Smuzhiyun 		writel(initdelay, &mmdc1->mprddlctl);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* Force a measurment, for previous delay setup to take effect. */
326*4882a593Smuzhiyun 	force_delay_measurement(sysinfo->dsize);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/*
329*4882a593Smuzhiyun 	 * ***************************
330*4882a593Smuzhiyun 	 * Read DQS Gating calibration
331*4882a593Smuzhiyun 	 * ***************************
332*4882a593Smuzhiyun 	 */
333*4882a593Smuzhiyun 	debug("Starting Read DQS Gating calibration.\n");
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/*
336*4882a593Smuzhiyun 	 * Reset the read data FIFOs (two resets); only need to issue reset
337*4882a593Smuzhiyun 	 * to PHY0 since in x64 mode, the reset will also go to PHY1.
338*4882a593Smuzhiyun 	 */
339*4882a593Smuzhiyun 	reset_read_data_fifos();
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/*
342*4882a593Smuzhiyun 	 * Start the automatic read DQS gating calibration process by
343*4882a593Smuzhiyun 	 * asserting MPDGCTRL0[HW_DG_EN] and MPDGCTRL0[DG_CMP_CYC]
344*4882a593Smuzhiyun 	 * and then poll MPDGCTRL0[HW_DG_EN]] until this bit clears
345*4882a593Smuzhiyun 	 * to indicate completion.
346*4882a593Smuzhiyun 	 * Also, ensure that MPDGCTRL0[HW_DG_ERR] is clear to indicate
347*4882a593Smuzhiyun 	 * no errors were seen during calibration.
348*4882a593Smuzhiyun 	 */
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	/*
351*4882a593Smuzhiyun 	 * Set bit 30: chooses option to wait 32 cycles instead of
352*4882a593Smuzhiyun 	 * 16 before comparing read data.
353*4882a593Smuzhiyun 	 */
354*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mpdgctrl0, 1 << 30);
355*4882a593Smuzhiyun 	if (sysinfo->dsize == 2)
356*4882a593Smuzhiyun 		setbits_le32(&mmdc1->mpdgctrl0, 1 << 30);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	/* Set bit 28 to start automatic read DQS gating calibration */
359*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mpdgctrl0, 5 << 28);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* Poll for completion.  MPDGCTRL0[HW_DG_EN] should be 0 */
362*4882a593Smuzhiyun 	wait_for_bit_le32(&mmdc0->mpdgctrl0, 1 << 28, 0, 100, 0);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	/*
365*4882a593Smuzhiyun 	 * Check to see if any errors were encountered during calibration
366*4882a593Smuzhiyun 	 * (check MPDGCTRL0[HW_DG_ERR]).
367*4882a593Smuzhiyun 	 * Check both PHYs for x64 configuration, if x32, check only PHY0.
368*4882a593Smuzhiyun 	 */
369*4882a593Smuzhiyun 	if (readl(&mmdc0->mpdgctrl0) & 0x00001000)
370*4882a593Smuzhiyun 		errors |= 1;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if ((sysinfo->dsize == 0x2) && (readl(&mmdc1->mpdgctrl0) & 0x00001000))
373*4882a593Smuzhiyun 		errors |= 2;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/* now disable mpdgctrl0[DG_CMP_CYC] */
376*4882a593Smuzhiyun 	clrbits_le32(&mmdc0->mpdgctrl0, 1 << 30);
377*4882a593Smuzhiyun 	if (sysinfo->dsize == 2)
378*4882a593Smuzhiyun 		clrbits_le32(&mmdc1->mpdgctrl0, 1 << 30);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/*
381*4882a593Smuzhiyun 	 * DQS gating absolute offset should be modified from
382*4882a593Smuzhiyun 	 * reflecting (HW_DG_LOWx + HW_DG_UPx)/2 to
383*4882a593Smuzhiyun 	 * reflecting (HW_DG_UPx - 0x80)
384*4882a593Smuzhiyun 	 */
385*4882a593Smuzhiyun 	modify_dg_result(&mmdc0->mpdghwst0, &mmdc0->mpdghwst1,
386*4882a593Smuzhiyun 			 &mmdc0->mpdgctrl0);
387*4882a593Smuzhiyun 	modify_dg_result(&mmdc0->mpdghwst2, &mmdc0->mpdghwst3,
388*4882a593Smuzhiyun 			 &mmdc0->mpdgctrl1);
389*4882a593Smuzhiyun 	if (sysinfo->dsize == 0x2) {
390*4882a593Smuzhiyun 		modify_dg_result(&mmdc1->mpdghwst0, &mmdc1->mpdghwst1,
391*4882a593Smuzhiyun 				 &mmdc1->mpdgctrl0);
392*4882a593Smuzhiyun 		modify_dg_result(&mmdc1->mpdghwst2, &mmdc1->mpdghwst3,
393*4882a593Smuzhiyun 				 &mmdc1->mpdgctrl1);
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun 	debug("Ending Read DQS Gating calibration. Error mask: 0x%x\n", errors);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/*
398*4882a593Smuzhiyun 	 * **********************
399*4882a593Smuzhiyun 	 * Read Delay calibration
400*4882a593Smuzhiyun 	 * **********************
401*4882a593Smuzhiyun 	 */
402*4882a593Smuzhiyun 	debug("Starting Read Delay calibration.\n");
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	reset_read_data_fifos();
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	/*
407*4882a593Smuzhiyun 	 * 4. Issue the Precharge-All command to the DDR device for both
408*4882a593Smuzhiyun 	 * chip selects.  If only using one chip select, then precharge
409*4882a593Smuzhiyun 	 * only the desired chip select.
410*4882a593Smuzhiyun 	 */
411*4882a593Smuzhiyun 	precharge_all(cs0_enable, cs1_enable);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	/*
414*4882a593Smuzhiyun 	 * 9. Read delay-line calibration
415*4882a593Smuzhiyun 	 * Start the automatic read calibration process by asserting
416*4882a593Smuzhiyun 	 * MPRDDLHWCTL[HW_RD_DL_EN].
417*4882a593Smuzhiyun 	 */
418*4882a593Smuzhiyun 	writel(0x00000030, &mmdc0->mprddlhwctl);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/*
421*4882a593Smuzhiyun 	 * 10. poll for completion
422*4882a593Smuzhiyun 	 * MMDC indicates that the write data calibration had finished by
423*4882a593Smuzhiyun 	 * setting MPRDDLHWCTL[HW_RD_DL_EN] = 0.   Also, ensure that
424*4882a593Smuzhiyun 	 * no error bits were set.
425*4882a593Smuzhiyun 	 */
426*4882a593Smuzhiyun 	wait_for_bit_le32(&mmdc0->mprddlhwctl, 1 << 4, 0, 100, 0);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	/* check both PHYs for x64 configuration, if x32, check only PHY0 */
429*4882a593Smuzhiyun 	if (readl(&mmdc0->mprddlhwctl) & 0x0000000f)
430*4882a593Smuzhiyun 		errors |= 4;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if ((sysinfo->dsize == 0x2) &&
433*4882a593Smuzhiyun 	    (readl(&mmdc1->mprddlhwctl) & 0x0000000f))
434*4882a593Smuzhiyun 		errors |= 8;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	debug("Ending Read Delay calibration. Error mask: 0x%x\n", errors);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	/*
439*4882a593Smuzhiyun 	 * ***********************
440*4882a593Smuzhiyun 	 * Write Delay Calibration
441*4882a593Smuzhiyun 	 * ***********************
442*4882a593Smuzhiyun 	 */
443*4882a593Smuzhiyun 	debug("Starting Write Delay calibration.\n");
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	reset_read_data_fifos();
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/*
448*4882a593Smuzhiyun 	 * 4. Issue the Precharge-All command to the DDR device for both
449*4882a593Smuzhiyun 	 * chip selects. If only using one chip select, then precharge
450*4882a593Smuzhiyun 	 * only the desired chip select.
451*4882a593Smuzhiyun 	 */
452*4882a593Smuzhiyun 	precharge_all(cs0_enable, cs1_enable);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	/*
455*4882a593Smuzhiyun 	 * 8. Set the WR_DL_ABS# bits to their default values.
456*4882a593Smuzhiyun 	 * Both PHYs for x64 configuration, if x32, do only PHY0.
457*4882a593Smuzhiyun 	 */
458*4882a593Smuzhiyun 	writel(initdelay, &mmdc0->mpwrdlctl);
459*4882a593Smuzhiyun 	if (sysinfo->dsize == 0x2)
460*4882a593Smuzhiyun 		writel(initdelay, &mmdc1->mpwrdlctl);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/*
463*4882a593Smuzhiyun 	 * XXX This isn't in the manual. Force a measurement,
464*4882a593Smuzhiyun 	 * for previous delay setup to effect.
465*4882a593Smuzhiyun 	 */
466*4882a593Smuzhiyun 	force_delay_measurement(sysinfo->dsize);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/*
469*4882a593Smuzhiyun 	 * 9. 10. Start the automatic write calibration process
470*4882a593Smuzhiyun 	 * by asserting MPWRDLHWCTL0[HW_WR_DL_EN].
471*4882a593Smuzhiyun 	 */
472*4882a593Smuzhiyun 	writel(0x00000030, &mmdc0->mpwrdlhwctl);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/*
475*4882a593Smuzhiyun 	 * Poll for completion.
476*4882a593Smuzhiyun 	 * MMDC indicates that the write data calibration had finished
477*4882a593Smuzhiyun 	 * by setting MPWRDLHWCTL[HW_WR_DL_EN] = 0.
478*4882a593Smuzhiyun 	 * Also, ensure that no error bits were set.
479*4882a593Smuzhiyun 	 */
480*4882a593Smuzhiyun 	wait_for_bit_le32(&mmdc0->mpwrdlhwctl, 1 << 4, 0, 100, 0);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	/* Check both PHYs for x64 configuration, if x32, check only PHY0 */
483*4882a593Smuzhiyun 	if (readl(&mmdc0->mpwrdlhwctl) & 0x0000000f)
484*4882a593Smuzhiyun 		errors |= 16;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if ((sysinfo->dsize == 0x2) &&
487*4882a593Smuzhiyun 	    (readl(&mmdc1->mpwrdlhwctl) & 0x0000000f))
488*4882a593Smuzhiyun 		errors |= 32;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	debug("Ending Write Delay calibration. Error mask: 0x%x\n", errors);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	reset_read_data_fifos();
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/* Enable DDR logic power down timer */
495*4882a593Smuzhiyun 	setbits_le32(&mmdc0->mdpdc, 0x00005500);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/* Enable Adopt power down timer */
498*4882a593Smuzhiyun 	clrbits_le32(&mmdc0->mapsr, 0x1);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/* Restore MDMISC value (RALAT, WALAT) to MMDCP1 */
501*4882a593Smuzhiyun 	writel(esdmisc_val, &mmdc0->mdmisc);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* Clear DQS pull ups */
504*4882a593Smuzhiyun 	clrbits_le32(&mx6_ddr_iomux->dram_sdqs0, 0x7000);
505*4882a593Smuzhiyun 	clrbits_le32(&mx6_ddr_iomux->dram_sdqs1, 0x7000);
506*4882a593Smuzhiyun 	clrbits_le32(&mx6_ddr_iomux->dram_sdqs2, 0x7000);
507*4882a593Smuzhiyun 	clrbits_le32(&mx6_ddr_iomux->dram_sdqs3, 0x7000);
508*4882a593Smuzhiyun 	clrbits_le32(&mx6_ddr_iomux->dram_sdqs4, 0x7000);
509*4882a593Smuzhiyun 	clrbits_le32(&mx6_ddr_iomux->dram_sdqs5, 0x7000);
510*4882a593Smuzhiyun 	clrbits_le32(&mx6_ddr_iomux->dram_sdqs6, 0x7000);
511*4882a593Smuzhiyun 	clrbits_le32(&mx6_ddr_iomux->dram_sdqs7, 0x7000);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	/* Re-enable SDE (chip selects) if they were set initially */
514*4882a593Smuzhiyun 	if (cs1_enable_initial)
515*4882a593Smuzhiyun 		/* Set SDE_1 */
516*4882a593Smuzhiyun 		setbits_le32(&mmdc0->mdctl, 1 << 30);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (cs0_enable_initial)
519*4882a593Smuzhiyun 		/* Set SDE_0 */
520*4882a593Smuzhiyun 		setbits_le32(&mmdc0->mdctl, 1 << 31);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	/* Re-enable to auto refresh */
523*4882a593Smuzhiyun 	writel(temp_ref, &mmdc0->mdref);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/* Clear the MDSCR (including the con_req bit) */
526*4882a593Smuzhiyun 	writel(0x0, &mmdc0->mdscr);	/* CS0 */
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* Poll to make sure the con_ack bit is clear */
529*4882a593Smuzhiyun 	wait_for_bit_le32(&mmdc0->mdscr, 1 << 14, 0, 100, 0);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/*
532*4882a593Smuzhiyun 	 * Print out the registers that were updated as a result
533*4882a593Smuzhiyun 	 * of the calibration process.
534*4882a593Smuzhiyun 	 */
535*4882a593Smuzhiyun 	debug("MMDC registers updated from calibration\n");
536*4882a593Smuzhiyun 	debug("Read DQS gating calibration:\n");
537*4882a593Smuzhiyun 	debug("\tMPDGCTRL0 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl0));
538*4882a593Smuzhiyun 	debug("\tMPDGCTRL1 PHY0 = 0x%08X\n", readl(&mmdc0->mpdgctrl1));
539*4882a593Smuzhiyun 	if (sysinfo->dsize == 2) {
540*4882a593Smuzhiyun 		debug("\tMPDGCTRL0 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl0));
541*4882a593Smuzhiyun 		debug("\tMPDGCTRL1 PHY1 = 0x%08X\n", readl(&mmdc1->mpdgctrl1));
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 	debug("Read calibration:\n");
544*4882a593Smuzhiyun 	debug("\tMPRDDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mprddlctl));
545*4882a593Smuzhiyun 	if (sysinfo->dsize == 2)
546*4882a593Smuzhiyun 		debug("\tMPRDDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mprddlctl));
547*4882a593Smuzhiyun 	debug("Write calibration:\n");
548*4882a593Smuzhiyun 	debug("\tMPWRDLCTL PHY0 = 0x%08X\n", readl(&mmdc0->mpwrdlctl));
549*4882a593Smuzhiyun 	if (sysinfo->dsize == 2)
550*4882a593Smuzhiyun 		debug("\tMPWRDLCTL PHY1 = 0x%08X\n", readl(&mmdc1->mpwrdlctl));
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	/*
553*4882a593Smuzhiyun 	 * Registers below are for debugging purposes.  These print out
554*4882a593Smuzhiyun 	 * the upper and lower boundaries captured during
555*4882a593Smuzhiyun 	 * read DQS gating calibration.
556*4882a593Smuzhiyun 	 */
557*4882a593Smuzhiyun 	debug("Status registers bounds for read DQS gating:\n");
558*4882a593Smuzhiyun 	debug("\tMPDGHWST0 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst0));
559*4882a593Smuzhiyun 	debug("\tMPDGHWST1 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst1));
560*4882a593Smuzhiyun 	debug("\tMPDGHWST2 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst2));
561*4882a593Smuzhiyun 	debug("\tMPDGHWST3 PHY0 = 0x%08x\n", readl(&mmdc0->mpdghwst3));
562*4882a593Smuzhiyun 	if (sysinfo->dsize == 2) {
563*4882a593Smuzhiyun 		debug("\tMPDGHWST0 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst0));
564*4882a593Smuzhiyun 		debug("\tMPDGHWST1 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst1));
565*4882a593Smuzhiyun 		debug("\tMPDGHWST2 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst2));
566*4882a593Smuzhiyun 		debug("\tMPDGHWST3 PHY1 = 0x%08x\n", readl(&mmdc1->mpdghwst3));
567*4882a593Smuzhiyun 	}
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	debug("Final do_dqs_calibration error mask: 0x%x\n", errors);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return errors;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun #endif
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun #if defined(CONFIG_MX6SX)
576*4882a593Smuzhiyun /* Configure MX6SX mmdc iomux */
mx6sx_dram_iocfg(unsigned width,const struct mx6sx_iomux_ddr_regs * ddr,const struct mx6sx_iomux_grp_regs * grp)577*4882a593Smuzhiyun void mx6sx_dram_iocfg(unsigned width,
578*4882a593Smuzhiyun 		      const struct mx6sx_iomux_ddr_regs *ddr,
579*4882a593Smuzhiyun 		      const struct mx6sx_iomux_grp_regs *grp)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	struct mx6sx_iomux_ddr_regs *mx6_ddr_iomux;
582*4882a593Smuzhiyun 	struct mx6sx_iomux_grp_regs *mx6_grp_iomux;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	mx6_ddr_iomux = (struct mx6sx_iomux_ddr_regs *)MX6SX_IOM_DDR_BASE;
585*4882a593Smuzhiyun 	mx6_grp_iomux = (struct mx6sx_iomux_grp_regs *)MX6SX_IOM_GRP_BASE;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/* DDR IO TYPE */
588*4882a593Smuzhiyun 	writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type);
589*4882a593Smuzhiyun 	writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	/* CLOCK */
592*4882a593Smuzhiyun 	writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/* ADDRESS */
595*4882a593Smuzhiyun 	writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas);
596*4882a593Smuzhiyun 	writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras);
597*4882a593Smuzhiyun 	writel(grp->grp_addds, &mx6_grp_iomux->grp_addds);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* Control */
600*4882a593Smuzhiyun 	writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset);
601*4882a593Smuzhiyun 	writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2);
602*4882a593Smuzhiyun 	writel(ddr->dram_sdcke0, &mx6_ddr_iomux->dram_sdcke0);
603*4882a593Smuzhiyun 	writel(ddr->dram_sdcke1, &mx6_ddr_iomux->dram_sdcke1);
604*4882a593Smuzhiyun 	writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0);
605*4882a593Smuzhiyun 	writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1);
606*4882a593Smuzhiyun 	writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* Data Strobes */
609*4882a593Smuzhiyun 	writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl);
610*4882a593Smuzhiyun 	writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0);
611*4882a593Smuzhiyun 	writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1);
612*4882a593Smuzhiyun 	if (width >= 32) {
613*4882a593Smuzhiyun 		writel(ddr->dram_sdqs2, &mx6_ddr_iomux->dram_sdqs2);
614*4882a593Smuzhiyun 		writel(ddr->dram_sdqs3, &mx6_ddr_iomux->dram_sdqs3);
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	/* Data */
618*4882a593Smuzhiyun 	writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode);
619*4882a593Smuzhiyun 	writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds);
620*4882a593Smuzhiyun 	writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds);
621*4882a593Smuzhiyun 	if (width >= 32) {
622*4882a593Smuzhiyun 		writel(grp->grp_b2ds, &mx6_grp_iomux->grp_b2ds);
623*4882a593Smuzhiyun 		writel(grp->grp_b3ds, &mx6_grp_iomux->grp_b3ds);
624*4882a593Smuzhiyun 	}
625*4882a593Smuzhiyun 	writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0);
626*4882a593Smuzhiyun 	writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1);
627*4882a593Smuzhiyun 	if (width >= 32) {
628*4882a593Smuzhiyun 		writel(ddr->dram_dqm2, &mx6_ddr_iomux->dram_dqm2);
629*4882a593Smuzhiyun 		writel(ddr->dram_dqm3, &mx6_ddr_iomux->dram_dqm3);
630*4882a593Smuzhiyun 	}
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun #endif
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun #ifdef CONFIG_MX6UL
mx6ul_dram_iocfg(unsigned width,const struct mx6ul_iomux_ddr_regs * ddr,const struct mx6ul_iomux_grp_regs * grp)635*4882a593Smuzhiyun void mx6ul_dram_iocfg(unsigned width,
636*4882a593Smuzhiyun 		      const struct mx6ul_iomux_ddr_regs *ddr,
637*4882a593Smuzhiyun 		      const struct mx6ul_iomux_grp_regs *grp)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun 	struct mx6ul_iomux_ddr_regs *mx6_ddr_iomux;
640*4882a593Smuzhiyun 	struct mx6ul_iomux_grp_regs *mx6_grp_iomux;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	mx6_ddr_iomux = (struct mx6ul_iomux_ddr_regs *)MX6UL_IOM_DDR_BASE;
643*4882a593Smuzhiyun 	mx6_grp_iomux = (struct mx6ul_iomux_grp_regs *)MX6UL_IOM_GRP_BASE;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	/* DDR IO TYPE */
646*4882a593Smuzhiyun 	writel(grp->grp_ddr_type, &mx6_grp_iomux->grp_ddr_type);
647*4882a593Smuzhiyun 	writel(grp->grp_ddrpke, &mx6_grp_iomux->grp_ddrpke);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	/* CLOCK */
650*4882a593Smuzhiyun 	writel(ddr->dram_sdclk_0, &mx6_ddr_iomux->dram_sdclk_0);
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	/* ADDRESS */
653*4882a593Smuzhiyun 	writel(ddr->dram_cas, &mx6_ddr_iomux->dram_cas);
654*4882a593Smuzhiyun 	writel(ddr->dram_ras, &mx6_ddr_iomux->dram_ras);
655*4882a593Smuzhiyun 	writel(grp->grp_addds, &mx6_grp_iomux->grp_addds);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* Control */
658*4882a593Smuzhiyun 	writel(ddr->dram_reset, &mx6_ddr_iomux->dram_reset);
659*4882a593Smuzhiyun 	writel(ddr->dram_sdba2, &mx6_ddr_iomux->dram_sdba2);
660*4882a593Smuzhiyun 	writel(ddr->dram_odt0, &mx6_ddr_iomux->dram_odt0);
661*4882a593Smuzhiyun 	writel(ddr->dram_odt1, &mx6_ddr_iomux->dram_odt1);
662*4882a593Smuzhiyun 	writel(grp->grp_ctlds, &mx6_grp_iomux->grp_ctlds);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	/* Data Strobes */
665*4882a593Smuzhiyun 	writel(grp->grp_ddrmode_ctl, &mx6_grp_iomux->grp_ddrmode_ctl);
666*4882a593Smuzhiyun 	writel(ddr->dram_sdqs0, &mx6_ddr_iomux->dram_sdqs0);
667*4882a593Smuzhiyun 	writel(ddr->dram_sdqs1, &mx6_ddr_iomux->dram_sdqs1);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	/* Data */
670*4882a593Smuzhiyun 	writel(grp->grp_ddrmode, &mx6_grp_iomux->grp_ddrmode);
671*4882a593Smuzhiyun 	writel(grp->grp_b0ds, &mx6_grp_iomux->grp_b0ds);
672*4882a593Smuzhiyun 	writel(grp->grp_b1ds, &mx6_grp_iomux->grp_b1ds);
673*4882a593Smuzhiyun 	writel(ddr->dram_dqm0, &mx6_ddr_iomux->dram_dqm0);
674*4882a593Smuzhiyun 	writel(ddr->dram_dqm1, &mx6_ddr_iomux->dram_dqm1);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun #endif
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun #if defined(CONFIG_MX6SL)
mx6sl_dram_iocfg(unsigned width,const struct mx6sl_iomux_ddr_regs * ddr,const struct mx6sl_iomux_grp_regs * grp)679*4882a593Smuzhiyun void mx6sl_dram_iocfg(unsigned width,
680*4882a593Smuzhiyun 		      const struct mx6sl_iomux_ddr_regs *ddr,
681*4882a593Smuzhiyun 		      const struct mx6sl_iomux_grp_regs *grp)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	struct mx6sl_iomux_ddr_regs *mx6_ddr_iomux;
684*4882a593Smuzhiyun 	struct mx6sl_iomux_grp_regs *mx6_grp_iomux;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	mx6_ddr_iomux = (struct mx6sl_iomux_ddr_regs *)MX6SL_IOM_DDR_BASE;
687*4882a593Smuzhiyun 	mx6_grp_iomux = (struct mx6sl_iomux_grp_regs *)MX6SL_IOM_GRP_BASE;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	/* DDR IO TYPE */
690*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
691*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	/* CLOCK */
694*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	/* ADDRESS */
697*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_cas = ddr->dram_cas;
698*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_ras = ddr->dram_ras;
699*4882a593Smuzhiyun 	mx6_grp_iomux->grp_addds = grp->grp_addds;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	/* Control */
702*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_reset = ddr->dram_reset;
703*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
704*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/* Data Strobes */
707*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
708*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
709*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
710*4882a593Smuzhiyun 	if (width >= 32) {
711*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
712*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	/* Data */
716*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
717*4882a593Smuzhiyun 	mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
718*4882a593Smuzhiyun 	mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
719*4882a593Smuzhiyun 	if (width >= 32) {
720*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
721*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
722*4882a593Smuzhiyun 	}
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
725*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
726*4882a593Smuzhiyun 	if (width >= 32) {
727*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
728*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
729*4882a593Smuzhiyun 	}
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun #endif
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun #if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6Q) || defined(CONFIG_MX6D)
734*4882a593Smuzhiyun /* Configure MX6DQ mmdc iomux */
mx6dq_dram_iocfg(unsigned width,const struct mx6dq_iomux_ddr_regs * ddr,const struct mx6dq_iomux_grp_regs * grp)735*4882a593Smuzhiyun void mx6dq_dram_iocfg(unsigned width,
736*4882a593Smuzhiyun 		      const struct mx6dq_iomux_ddr_regs *ddr,
737*4882a593Smuzhiyun 		      const struct mx6dq_iomux_grp_regs *grp)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	volatile struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux;
740*4882a593Smuzhiyun 	volatile struct mx6dq_iomux_grp_regs *mx6_grp_iomux;
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	mx6_ddr_iomux = (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE;
743*4882a593Smuzhiyun 	mx6_grp_iomux = (struct mx6dq_iomux_grp_regs *)MX6DQ_IOM_GRP_BASE;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	/* DDR IO Type */
746*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
747*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	/* Clock */
750*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
751*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	/* Address */
754*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_cas = ddr->dram_cas;
755*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_ras = ddr->dram_ras;
756*4882a593Smuzhiyun 	mx6_grp_iomux->grp_addds = grp->grp_addds;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	/* Control */
759*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_reset = ddr->dram_reset;
760*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
761*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
762*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
763*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
764*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
765*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	/* Data Strobes */
768*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
769*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
770*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
771*4882a593Smuzhiyun 	if (width >= 32) {
772*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
773*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
774*4882a593Smuzhiyun 	}
775*4882a593Smuzhiyun 	if (width >= 64) {
776*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
777*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
778*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
779*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
780*4882a593Smuzhiyun 	}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	/* Data */
783*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
784*4882a593Smuzhiyun 	mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
785*4882a593Smuzhiyun 	mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
786*4882a593Smuzhiyun 	if (width >= 32) {
787*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
788*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 	if (width >= 64) {
791*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
792*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
793*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
794*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
797*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
798*4882a593Smuzhiyun 	if (width >= 32) {
799*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
800*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
801*4882a593Smuzhiyun 	}
802*4882a593Smuzhiyun 	if (width >= 64) {
803*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
804*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
805*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
806*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
807*4882a593Smuzhiyun 	}
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun #endif
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun #if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6DL) || defined(CONFIG_MX6S)
812*4882a593Smuzhiyun /* Configure MX6SDL mmdc iomux */
mx6sdl_dram_iocfg(unsigned width,const struct mx6sdl_iomux_ddr_regs * ddr,const struct mx6sdl_iomux_grp_regs * grp)813*4882a593Smuzhiyun void mx6sdl_dram_iocfg(unsigned width,
814*4882a593Smuzhiyun 		       const struct mx6sdl_iomux_ddr_regs *ddr,
815*4882a593Smuzhiyun 		       const struct mx6sdl_iomux_grp_regs *grp)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun 	volatile struct mx6sdl_iomux_ddr_regs *mx6_ddr_iomux;
818*4882a593Smuzhiyun 	volatile struct mx6sdl_iomux_grp_regs *mx6_grp_iomux;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	mx6_ddr_iomux = (struct mx6sdl_iomux_ddr_regs *)MX6SDL_IOM_DDR_BASE;
821*4882a593Smuzhiyun 	mx6_grp_iomux = (struct mx6sdl_iomux_grp_regs *)MX6SDL_IOM_GRP_BASE;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	/* DDR IO Type */
824*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
825*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	/* Clock */
828*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
829*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	/* Address */
832*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_cas = ddr->dram_cas;
833*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_ras = ddr->dram_ras;
834*4882a593Smuzhiyun 	mx6_grp_iomux->grp_addds = grp->grp_addds;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	/* Control */
837*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_reset = ddr->dram_reset;
838*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
839*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
840*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
841*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
842*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
843*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	/* Data Strobes */
846*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
847*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
848*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
849*4882a593Smuzhiyun 	if (width >= 32) {
850*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
851*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
852*4882a593Smuzhiyun 	}
853*4882a593Smuzhiyun 	if (width >= 64) {
854*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
855*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
856*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
857*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
858*4882a593Smuzhiyun 	}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	/* Data */
861*4882a593Smuzhiyun 	mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
862*4882a593Smuzhiyun 	mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
863*4882a593Smuzhiyun 	mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
864*4882a593Smuzhiyun 	if (width >= 32) {
865*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
866*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
867*4882a593Smuzhiyun 	}
868*4882a593Smuzhiyun 	if (width >= 64) {
869*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
870*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
871*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
872*4882a593Smuzhiyun 		mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
875*4882a593Smuzhiyun 	mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
876*4882a593Smuzhiyun 	if (width >= 32) {
877*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
878*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
879*4882a593Smuzhiyun 	}
880*4882a593Smuzhiyun 	if (width >= 64) {
881*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
882*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
883*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
884*4882a593Smuzhiyun 		mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
885*4882a593Smuzhiyun 	}
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun #endif
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun /*
890*4882a593Smuzhiyun  * Configure mx6 mmdc registers based on:
891*4882a593Smuzhiyun  *  - board-specific memory configuration
892*4882a593Smuzhiyun  *  - board-specific calibration data
893*4882a593Smuzhiyun  *  - ddr3/lpddr2 chip details
894*4882a593Smuzhiyun  *
895*4882a593Smuzhiyun  * The various calculations here are derived from the Freescale
896*4882a593Smuzhiyun  * 1. i.Mx6DQSDL DDR3 Script Aid spreadsheet (DOC-94917) designed to generate
897*4882a593Smuzhiyun  *    MMDC configuration registers based on memory system and memory chip
898*4882a593Smuzhiyun  *    parameters.
899*4882a593Smuzhiyun  *
900*4882a593Smuzhiyun  * 2. i.Mx6SL LPDDR2 Script Aid spreadsheet V0.04 designed to generate MMDC
901*4882a593Smuzhiyun  *    configuration registers based on memory system and memory chip
902*4882a593Smuzhiyun  *    parameters.
903*4882a593Smuzhiyun  *
904*4882a593Smuzhiyun  * The defaults here are those which were specified in the spreadsheet.
905*4882a593Smuzhiyun  * For details on each register, refer to the IMX6DQRM and/or IMX6SDLRM
906*4882a593Smuzhiyun  * and/or IMX6SLRM section titled MMDC initialization.
907*4882a593Smuzhiyun  */
908*4882a593Smuzhiyun #define MR(val, ba, cmd, cs1) \
909*4882a593Smuzhiyun 	((val << 16) | (1 << 15) | (cmd << 4) | (cs1 << 3) | ba)
910*4882a593Smuzhiyun #define MMDC1(entry, value) do {					  \
911*4882a593Smuzhiyun 	if (!is_mx6sx() && !is_mx6ul() && !is_mx6sl())			  \
912*4882a593Smuzhiyun 		mmdc1->entry = value;					  \
913*4882a593Smuzhiyun 	} while (0)
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun /*
916*4882a593Smuzhiyun  * According JESD209-2B-LPDDR2: Table 103
917*4882a593Smuzhiyun  * WL: write latency
918*4882a593Smuzhiyun  */
lpddr2_wl(uint32_t mem_speed)919*4882a593Smuzhiyun static int lpddr2_wl(uint32_t mem_speed)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun 	switch (mem_speed) {
922*4882a593Smuzhiyun 	case 1066:
923*4882a593Smuzhiyun 	case 933:
924*4882a593Smuzhiyun 		return 4;
925*4882a593Smuzhiyun 	case 800:
926*4882a593Smuzhiyun 		return 3;
927*4882a593Smuzhiyun 	case 677:
928*4882a593Smuzhiyun 	case 533:
929*4882a593Smuzhiyun 		return 2;
930*4882a593Smuzhiyun 	case 400:
931*4882a593Smuzhiyun 	case 333:
932*4882a593Smuzhiyun 		return 1;
933*4882a593Smuzhiyun 	default:
934*4882a593Smuzhiyun 		puts("invalid memory speed\n");
935*4882a593Smuzhiyun 		hang();
936*4882a593Smuzhiyun 	}
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	return 0;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun  * According JESD209-2B-LPDDR2: Table 103
943*4882a593Smuzhiyun  * RL: read latency
944*4882a593Smuzhiyun  */
lpddr2_rl(uint32_t mem_speed)945*4882a593Smuzhiyun static int lpddr2_rl(uint32_t mem_speed)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun 	switch (mem_speed) {
948*4882a593Smuzhiyun 	case 1066:
949*4882a593Smuzhiyun 		return 8;
950*4882a593Smuzhiyun 	case 933:
951*4882a593Smuzhiyun 		return 7;
952*4882a593Smuzhiyun 	case 800:
953*4882a593Smuzhiyun 		return 6;
954*4882a593Smuzhiyun 	case 677:
955*4882a593Smuzhiyun 		return 5;
956*4882a593Smuzhiyun 	case 533:
957*4882a593Smuzhiyun 		return 4;
958*4882a593Smuzhiyun 	case 400:
959*4882a593Smuzhiyun 	case 333:
960*4882a593Smuzhiyun 		return 3;
961*4882a593Smuzhiyun 	default:
962*4882a593Smuzhiyun 		puts("invalid memory speed\n");
963*4882a593Smuzhiyun 		hang();
964*4882a593Smuzhiyun 	}
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	return 0;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun 
mx6_lpddr2_cfg(const struct mx6_ddr_sysinfo * sysinfo,const struct mx6_mmdc_calibration * calib,const struct mx6_lpddr2_cfg * lpddr2_cfg)969*4882a593Smuzhiyun void mx6_lpddr2_cfg(const struct mx6_ddr_sysinfo *sysinfo,
970*4882a593Smuzhiyun 		    const struct mx6_mmdc_calibration *calib,
971*4882a593Smuzhiyun 		    const struct mx6_lpddr2_cfg *lpddr2_cfg)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	volatile struct mmdc_p_regs *mmdc0;
974*4882a593Smuzhiyun 	u32 val;
975*4882a593Smuzhiyun 	u8 tcke, tcksrx, tcksre, trrd;
976*4882a593Smuzhiyun 	u8 twl, txp, tfaw, tcl;
977*4882a593Smuzhiyun 	u16 tras, twr, tmrd, trtp, twtr, trfc, txsr;
978*4882a593Smuzhiyun 	u16 trcd_lp, trppb_lp, trpab_lp, trc_lp;
979*4882a593Smuzhiyun 	u16 cs0_end;
980*4882a593Smuzhiyun 	u8 coladdr;
981*4882a593Smuzhiyun 	int clkper; /* clock period in picoseconds */
982*4882a593Smuzhiyun 	int clock;  /* clock freq in mHz */
983*4882a593Smuzhiyun 	int cs;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	/* only support 16/32 bits */
986*4882a593Smuzhiyun 	if (sysinfo->dsize > 1)
987*4882a593Smuzhiyun 		hang();
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	clock = mxc_get_clock(MXC_DDR_CLK) / 1000000U;
992*4882a593Smuzhiyun 	clkper = (1000 * 1000) / clock; /* pico seconds */
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	twl = lpddr2_wl(lpddr2_cfg->mem_speed) - 1;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/* LPDDR2-S2 and LPDDR2-S4 have the same tRFC value. */
997*4882a593Smuzhiyun 	switch (lpddr2_cfg->density) {
998*4882a593Smuzhiyun 	case 1:
999*4882a593Smuzhiyun 	case 2:
1000*4882a593Smuzhiyun 	case 4:
1001*4882a593Smuzhiyun 		trfc = DIV_ROUND_UP(130000, clkper) - 1;
1002*4882a593Smuzhiyun 		txsr = DIV_ROUND_UP(140000, clkper) - 1;
1003*4882a593Smuzhiyun 		break;
1004*4882a593Smuzhiyun 	case 8:
1005*4882a593Smuzhiyun 		trfc = DIV_ROUND_UP(210000, clkper) - 1;
1006*4882a593Smuzhiyun 		txsr = DIV_ROUND_UP(220000, clkper) - 1;
1007*4882a593Smuzhiyun 		break;
1008*4882a593Smuzhiyun 	default:
1009*4882a593Smuzhiyun 		/*
1010*4882a593Smuzhiyun 		 * 64Mb, 128Mb, 256Mb, 512Mb are not supported currently.
1011*4882a593Smuzhiyun 		 */
1012*4882a593Smuzhiyun 		hang();
1013*4882a593Smuzhiyun 		break;
1014*4882a593Smuzhiyun 	}
1015*4882a593Smuzhiyun 	/*
1016*4882a593Smuzhiyun 	 * txpdll, txpr, taonpd and taofpd are not relevant in LPDDR2 mode,
1017*4882a593Smuzhiyun 	 * set them to 0. */
1018*4882a593Smuzhiyun 	txp = DIV_ROUND_UP(7500, clkper) - 1;
1019*4882a593Smuzhiyun 	tcke = 3;
1020*4882a593Smuzhiyun 	if (lpddr2_cfg->mem_speed == 333)
1021*4882a593Smuzhiyun 		tfaw = DIV_ROUND_UP(60000, clkper) - 1;
1022*4882a593Smuzhiyun 	else
1023*4882a593Smuzhiyun 		tfaw = DIV_ROUND_UP(50000, clkper) - 1;
1024*4882a593Smuzhiyun 	trrd = DIV_ROUND_UP(10000, clkper) - 1;
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	/* tckesr for LPDDR2 */
1027*4882a593Smuzhiyun 	tcksre = DIV_ROUND_UP(15000, clkper);
1028*4882a593Smuzhiyun 	tcksrx = tcksre;
1029*4882a593Smuzhiyun 	twr  = DIV_ROUND_UP(15000, clkper) - 1;
1030*4882a593Smuzhiyun 	/*
1031*4882a593Smuzhiyun 	 * tMRR: 2, tMRW: 5
1032*4882a593Smuzhiyun 	 * tMRD should be set to max(tMRR, tMRW)
1033*4882a593Smuzhiyun 	 */
1034*4882a593Smuzhiyun 	tmrd = 5;
1035*4882a593Smuzhiyun 	tras = DIV_ROUND_UP(lpddr2_cfg->trasmin, clkper / 10) - 1;
1036*4882a593Smuzhiyun 	/* LPDDR2 mode use tRCD_LP filed in MDCFG3. */
1037*4882a593Smuzhiyun 	trcd_lp = DIV_ROUND_UP(lpddr2_cfg->trcd_lp, clkper / 10) - 1;
1038*4882a593Smuzhiyun 	trc_lp = DIV_ROUND_UP(lpddr2_cfg->trasmin + lpddr2_cfg->trppb_lp,
1039*4882a593Smuzhiyun 			      clkper / 10) - 1;
1040*4882a593Smuzhiyun 	trppb_lp = DIV_ROUND_UP(lpddr2_cfg->trppb_lp, clkper / 10) - 1;
1041*4882a593Smuzhiyun 	trpab_lp = DIV_ROUND_UP(lpddr2_cfg->trpab_lp, clkper / 10) - 1;
1042*4882a593Smuzhiyun 	/* To LPDDR2, CL in MDCFG0 refers to RL */
1043*4882a593Smuzhiyun 	tcl = lpddr2_rl(lpddr2_cfg->mem_speed) - 3;
1044*4882a593Smuzhiyun 	twtr = DIV_ROUND_UP(7500, clkper) - 1;
1045*4882a593Smuzhiyun 	trtp = DIV_ROUND_UP(7500, clkper) - 1;
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 	cs0_end = 4 * sysinfo->cs_density - 1;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	debug("density:%d Gb (%d Gb per chip)\n",
1050*4882a593Smuzhiyun 	      sysinfo->cs_density, lpddr2_cfg->density);
1051*4882a593Smuzhiyun 	debug("clock: %dMHz (%d ps)\n", clock, clkper);
1052*4882a593Smuzhiyun 	debug("memspd:%d\n", lpddr2_cfg->mem_speed);
1053*4882a593Smuzhiyun 	debug("trcd_lp=%d\n", trcd_lp);
1054*4882a593Smuzhiyun 	debug("trppb_lp=%d\n", trppb_lp);
1055*4882a593Smuzhiyun 	debug("trpab_lp=%d\n", trpab_lp);
1056*4882a593Smuzhiyun 	debug("trc_lp=%d\n", trc_lp);
1057*4882a593Smuzhiyun 	debug("tcke=%d\n", tcke);
1058*4882a593Smuzhiyun 	debug("tcksrx=%d\n", tcksrx);
1059*4882a593Smuzhiyun 	debug("tcksre=%d\n", tcksre);
1060*4882a593Smuzhiyun 	debug("trfc=%d\n", trfc);
1061*4882a593Smuzhiyun 	debug("txsr=%d\n", txsr);
1062*4882a593Smuzhiyun 	debug("txp=%d\n", txp);
1063*4882a593Smuzhiyun 	debug("tfaw=%d\n", tfaw);
1064*4882a593Smuzhiyun 	debug("tcl=%d\n", tcl);
1065*4882a593Smuzhiyun 	debug("tras=%d\n", tras);
1066*4882a593Smuzhiyun 	debug("twr=%d\n", twr);
1067*4882a593Smuzhiyun 	debug("tmrd=%d\n", tmrd);
1068*4882a593Smuzhiyun 	debug("twl=%d\n", twl);
1069*4882a593Smuzhiyun 	debug("trtp=%d\n", trtp);
1070*4882a593Smuzhiyun 	debug("twtr=%d\n", twtr);
1071*4882a593Smuzhiyun 	debug("trrd=%d\n", trrd);
1072*4882a593Smuzhiyun 	debug("cs0_end=%d\n", cs0_end);
1073*4882a593Smuzhiyun 	debug("ncs=%d\n", sysinfo->ncs);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	/*
1076*4882a593Smuzhiyun 	 * board-specific configuration:
1077*4882a593Smuzhiyun 	 *  These values are determined empirically and vary per board layout
1078*4882a593Smuzhiyun 	 */
1079*4882a593Smuzhiyun 	mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0;
1080*4882a593Smuzhiyun 	mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1;
1081*4882a593Smuzhiyun 	mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0;
1082*4882a593Smuzhiyun 	mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1;
1083*4882a593Smuzhiyun 	mmdc0->mprddlctl = calib->p0_mprddlctl;
1084*4882a593Smuzhiyun 	mmdc0->mpwrdlctl = calib->p0_mpwrdlctl;
1085*4882a593Smuzhiyun 	mmdc0->mpzqlp2ctl = calib->mpzqlp2ctl;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/* Read data DQ Byte0-3 delay */
1088*4882a593Smuzhiyun 	mmdc0->mprddqby0dl = 0x33333333;
1089*4882a593Smuzhiyun 	mmdc0->mprddqby1dl = 0x33333333;
1090*4882a593Smuzhiyun 	if (sysinfo->dsize > 0) {
1091*4882a593Smuzhiyun 		mmdc0->mprddqby2dl = 0x33333333;
1092*4882a593Smuzhiyun 		mmdc0->mprddqby3dl = 0x33333333;
1093*4882a593Smuzhiyun 	}
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	/* Write data DQ Byte0-3 delay */
1096*4882a593Smuzhiyun 	mmdc0->mpwrdqby0dl = 0xf3333333;
1097*4882a593Smuzhiyun 	mmdc0->mpwrdqby1dl = 0xf3333333;
1098*4882a593Smuzhiyun 	if (sysinfo->dsize > 0) {
1099*4882a593Smuzhiyun 		mmdc0->mpwrdqby2dl = 0xf3333333;
1100*4882a593Smuzhiyun 		mmdc0->mpwrdqby3dl = 0xf3333333;
1101*4882a593Smuzhiyun 	}
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	/*
1104*4882a593Smuzhiyun 	 * In LPDDR2 mode this register should be cleared,
1105*4882a593Smuzhiyun 	 * so no termination will be activated.
1106*4882a593Smuzhiyun 	 */
1107*4882a593Smuzhiyun 	mmdc0->mpodtctrl = 0;
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	/* complete calibration */
1110*4882a593Smuzhiyun 	val = (1 << 11); /* Force measurement on delay-lines */
1111*4882a593Smuzhiyun 	mmdc0->mpmur0 = val;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	/* Step 1: configuration request */
1114*4882a593Smuzhiyun 	mmdc0->mdscr = (u32)(1 << 15); /* config request */
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	/* Step 2: Timing configuration */
1117*4882a593Smuzhiyun 	mmdc0->mdcfg0 = (trfc << 24) | (txsr << 16) | (txp << 13) |
1118*4882a593Smuzhiyun 			(tfaw << 4) | tcl;
1119*4882a593Smuzhiyun 	mmdc0->mdcfg1 = (tras << 16) | (twr << 9) | (tmrd << 5) | twl;
1120*4882a593Smuzhiyun 	mmdc0->mdcfg2 = (trtp << 6) | (twtr << 3) | trrd;
1121*4882a593Smuzhiyun 	mmdc0->mdcfg3lp = (trc_lp << 16) | (trcd_lp << 8) |
1122*4882a593Smuzhiyun 			  (trppb_lp << 4) | trpab_lp;
1123*4882a593Smuzhiyun 	mmdc0->mdotc = 0;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	mmdc0->mdasp = cs0_end; /* CS addressing */
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	/* Step 3: Configure DDR type */
1128*4882a593Smuzhiyun 	mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) |
1129*4882a593Smuzhiyun 			(sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) |
1130*4882a593Smuzhiyun 			(sysinfo->ralat << 6) | (1 << 3);
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	/* Step 4: Configure delay while leaving reset */
1133*4882a593Smuzhiyun 	mmdc0->mdor = (sysinfo->sde_to_rst << 8) |
1134*4882a593Smuzhiyun 		      (sysinfo->rst_to_cke << 0);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	/* Step 5: Configure DDR physical parameters (density and burst len) */
1137*4882a593Smuzhiyun 	coladdr = lpddr2_cfg->coladdr;
1138*4882a593Smuzhiyun 	if (lpddr2_cfg->coladdr == 8)		/* 8-bit COL is 0x3 */
1139*4882a593Smuzhiyun 		coladdr += 4;
1140*4882a593Smuzhiyun 	else if (lpddr2_cfg->coladdr == 12)	/* 12-bit COL is 0x4 */
1141*4882a593Smuzhiyun 		coladdr += 1;
1142*4882a593Smuzhiyun 	mmdc0->mdctl =  (lpddr2_cfg->rowaddr - 11) << 24 |	/* ROW */
1143*4882a593Smuzhiyun 			(coladdr - 9) << 20 |			/* COL */
1144*4882a593Smuzhiyun 			(0 << 19) |	/* Burst Length = 4 for LPDDR2 */
1145*4882a593Smuzhiyun 			(sysinfo->dsize << 16);	/* DDR data bus size */
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	/* Step 6: Perform ZQ calibration */
1148*4882a593Smuzhiyun 	val = 0xa1390003; /* one-time HW ZQ calib */
1149*4882a593Smuzhiyun 	mmdc0->mpzqhwctrl = val;
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	/* Step 7: Enable MMDC with desired chip select */
1152*4882a593Smuzhiyun 	mmdc0->mdctl |= (1 << 31) |			     /* SDE_0 for CS0 */
1153*4882a593Smuzhiyun 			((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	/* Step 8: Write Mode Registers to Init LPDDR2 devices */
1156*4882a593Smuzhiyun 	for (cs = 0; cs < sysinfo->ncs; cs++) {
1157*4882a593Smuzhiyun 		/* MR63: reset */
1158*4882a593Smuzhiyun 		mmdc0->mdscr = MR(63, 0, 3, cs);
1159*4882a593Smuzhiyun 		/* MR10: calibration,
1160*4882a593Smuzhiyun 		 * 0xff is calibration command after intilization.
1161*4882a593Smuzhiyun 		 */
1162*4882a593Smuzhiyun 		val = 0xA | (0xff << 8);
1163*4882a593Smuzhiyun 		mmdc0->mdscr = MR(val, 0, 3, cs);
1164*4882a593Smuzhiyun 		/* MR1 */
1165*4882a593Smuzhiyun 		val = 0x1 | (0x82 << 8);
1166*4882a593Smuzhiyun 		mmdc0->mdscr = MR(val, 0, 3, cs);
1167*4882a593Smuzhiyun 		/* MR2 */
1168*4882a593Smuzhiyun 		val = 0x2 | (0x04 << 8);
1169*4882a593Smuzhiyun 		mmdc0->mdscr = MR(val, 0, 3, cs);
1170*4882a593Smuzhiyun 		/* MR3 */
1171*4882a593Smuzhiyun 		val = 0x3 | (0x02 << 8);
1172*4882a593Smuzhiyun 		mmdc0->mdscr = MR(val, 0, 3, cs);
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	/* Step 10: Power down control and self-refresh */
1176*4882a593Smuzhiyun 	mmdc0->mdpdc = (tcke & 0x7) << 16 |
1177*4882a593Smuzhiyun 			5            << 12 |  /* PWDT_1: 256 cycles */
1178*4882a593Smuzhiyun 			5            <<  8 |  /* PWDT_0: 256 cycles */
1179*4882a593Smuzhiyun 			1            <<  6 |  /* BOTH_CS_PD */
1180*4882a593Smuzhiyun 			(tcksrx & 0x7) << 3 |
1181*4882a593Smuzhiyun 			(tcksre & 0x7);
1182*4882a593Smuzhiyun 	mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	/* Step 11: Configure ZQ calibration: one-time and periodic 1ms */
1185*4882a593Smuzhiyun 	val = 0xa1310003;
1186*4882a593Smuzhiyun 	mmdc0->mpzqhwctrl = val;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	/* Step 12: Configure and activate periodic refresh */
1189*4882a593Smuzhiyun 	mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11);
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	/* Step 13: Deassert config request - init complete */
1192*4882a593Smuzhiyun 	mmdc0->mdscr = 0x00000000;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	/* wait for auto-ZQ calibration to complete */
1195*4882a593Smuzhiyun 	mdelay(1);
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun 
mx6_ddr3_cfg(const struct mx6_ddr_sysinfo * sysinfo,const struct mx6_mmdc_calibration * calib,const struct mx6_ddr3_cfg * ddr3_cfg)1198*4882a593Smuzhiyun void mx6_ddr3_cfg(const struct mx6_ddr_sysinfo *sysinfo,
1199*4882a593Smuzhiyun 		  const struct mx6_mmdc_calibration *calib,
1200*4882a593Smuzhiyun 		  const struct mx6_ddr3_cfg *ddr3_cfg)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun 	volatile struct mmdc_p_regs *mmdc0;
1203*4882a593Smuzhiyun 	volatile struct mmdc_p_regs *mmdc1;
1204*4882a593Smuzhiyun 	u32 val;
1205*4882a593Smuzhiyun 	u8 tcke, tcksrx, tcksre, txpdll, taofpd, taonpd, trrd;
1206*4882a593Smuzhiyun 	u8 todtlon, taxpd, tanpd, tcwl, txp, tfaw, tcl;
1207*4882a593Smuzhiyun 	u8 todt_idle_off = 0x4; /* from DDR3 Script Aid spreadsheet */
1208*4882a593Smuzhiyun 	u16 trcd, trc, tras, twr, tmrd, trtp, trp, twtr, trfc, txs, txpr;
1209*4882a593Smuzhiyun 	u16 cs0_end;
1210*4882a593Smuzhiyun 	u16 tdllk = 0x1ff; /* DLL locking time: 512 cycles (JEDEC DDR3) */
1211*4882a593Smuzhiyun 	u8 coladdr;
1212*4882a593Smuzhiyun 	int clkper; /* clock period in picoseconds */
1213*4882a593Smuzhiyun 	int clock; /* clock freq in MHz */
1214*4882a593Smuzhiyun 	int cs;
1215*4882a593Smuzhiyun 	u16 mem_speed = ddr3_cfg->mem_speed;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
1218*4882a593Smuzhiyun 	if (!is_mx6sx() && !is_mx6ul() && !is_mx6sl())
1219*4882a593Smuzhiyun 		mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	/* Limit mem_speed for MX6D/MX6Q */
1222*4882a593Smuzhiyun 	if (is_mx6dq() || is_mx6dqp()) {
1223*4882a593Smuzhiyun 		if (mem_speed > 1066)
1224*4882a593Smuzhiyun 			mem_speed = 1066; /* 1066 MT/s */
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 		tcwl = 4;
1227*4882a593Smuzhiyun 	}
1228*4882a593Smuzhiyun 	/* Limit mem_speed for MX6S/MX6DL */
1229*4882a593Smuzhiyun 	else {
1230*4882a593Smuzhiyun 		if (mem_speed > 800)
1231*4882a593Smuzhiyun 			mem_speed = 800;  /* 800 MT/s */
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 		tcwl = 3;
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	clock = mem_speed / 2;
1237*4882a593Smuzhiyun 	/*
1238*4882a593Smuzhiyun 	 * Data rate of 1066 MT/s requires 533 MHz DDR3 clock, but MX6D/Q supports
1239*4882a593Smuzhiyun 	 * up to 528 MHz, so reduce the clock to fit chip specs
1240*4882a593Smuzhiyun 	 */
1241*4882a593Smuzhiyun 	if (is_mx6dq() || is_mx6dqp()) {
1242*4882a593Smuzhiyun 		if (clock > 528)
1243*4882a593Smuzhiyun 			clock = 528; /* 528 MHz */
1244*4882a593Smuzhiyun 	}
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	clkper = (1000 * 1000) / clock; /* pico seconds */
1247*4882a593Smuzhiyun 	todtlon = tcwl;
1248*4882a593Smuzhiyun 	taxpd = tcwl;
1249*4882a593Smuzhiyun 	tanpd = tcwl;
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 	switch (ddr3_cfg->density) {
1252*4882a593Smuzhiyun 	case 1: /* 1Gb per chip */
1253*4882a593Smuzhiyun 		trfc = DIV_ROUND_UP(110000, clkper) - 1;
1254*4882a593Smuzhiyun 		txs = DIV_ROUND_UP(120000, clkper) - 1;
1255*4882a593Smuzhiyun 		break;
1256*4882a593Smuzhiyun 	case 2: /* 2Gb per chip */
1257*4882a593Smuzhiyun 		trfc = DIV_ROUND_UP(160000, clkper) - 1;
1258*4882a593Smuzhiyun 		txs = DIV_ROUND_UP(170000, clkper) - 1;
1259*4882a593Smuzhiyun 		break;
1260*4882a593Smuzhiyun 	case 4: /* 4Gb per chip */
1261*4882a593Smuzhiyun 		trfc = DIV_ROUND_UP(260000, clkper) - 1;
1262*4882a593Smuzhiyun 		txs = DIV_ROUND_UP(270000, clkper) - 1;
1263*4882a593Smuzhiyun 		break;
1264*4882a593Smuzhiyun 	case 8: /* 8Gb per chip */
1265*4882a593Smuzhiyun 		trfc = DIV_ROUND_UP(350000, clkper) - 1;
1266*4882a593Smuzhiyun 		txs = DIV_ROUND_UP(360000, clkper) - 1;
1267*4882a593Smuzhiyun 		break;
1268*4882a593Smuzhiyun 	default:
1269*4882a593Smuzhiyun 		/* invalid density */
1270*4882a593Smuzhiyun 		puts("invalid chip density\n");
1271*4882a593Smuzhiyun 		hang();
1272*4882a593Smuzhiyun 		break;
1273*4882a593Smuzhiyun 	}
1274*4882a593Smuzhiyun 	txpr = txs;
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	switch (mem_speed) {
1277*4882a593Smuzhiyun 	case 800:
1278*4882a593Smuzhiyun 		txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
1279*4882a593Smuzhiyun 		tcke = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
1280*4882a593Smuzhiyun 		if (ddr3_cfg->pagesz == 1) {
1281*4882a593Smuzhiyun 			tfaw = DIV_ROUND_UP(40000, clkper) - 1;
1282*4882a593Smuzhiyun 			trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
1283*4882a593Smuzhiyun 		} else {
1284*4882a593Smuzhiyun 			tfaw = DIV_ROUND_UP(50000, clkper) - 1;
1285*4882a593Smuzhiyun 			trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
1286*4882a593Smuzhiyun 		}
1287*4882a593Smuzhiyun 		break;
1288*4882a593Smuzhiyun 	case 1066:
1289*4882a593Smuzhiyun 		txp = DIV_ROUND_UP(max(3 * clkper, 7500), clkper) - 1;
1290*4882a593Smuzhiyun 		tcke = DIV_ROUND_UP(max(3 * clkper, 5625), clkper) - 1;
1291*4882a593Smuzhiyun 		if (ddr3_cfg->pagesz == 1) {
1292*4882a593Smuzhiyun 			tfaw = DIV_ROUND_UP(37500, clkper) - 1;
1293*4882a593Smuzhiyun 			trrd = DIV_ROUND_UP(max(4 * clkper, 7500), clkper) - 1;
1294*4882a593Smuzhiyun 		} else {
1295*4882a593Smuzhiyun 			tfaw = DIV_ROUND_UP(50000, clkper) - 1;
1296*4882a593Smuzhiyun 			trrd = DIV_ROUND_UP(max(4 * clkper, 10000), clkper) - 1;
1297*4882a593Smuzhiyun 		}
1298*4882a593Smuzhiyun 		break;
1299*4882a593Smuzhiyun 	default:
1300*4882a593Smuzhiyun 		puts("invalid memory speed\n");
1301*4882a593Smuzhiyun 		hang();
1302*4882a593Smuzhiyun 		break;
1303*4882a593Smuzhiyun 	}
1304*4882a593Smuzhiyun 	txpdll = DIV_ROUND_UP(max(10 * clkper, 24000), clkper) - 1;
1305*4882a593Smuzhiyun 	tcksre = DIV_ROUND_UP(max(5 * clkper, 10000), clkper);
1306*4882a593Smuzhiyun 	taonpd = DIV_ROUND_UP(2000, clkper) - 1;
1307*4882a593Smuzhiyun 	tcksrx = tcksre;
1308*4882a593Smuzhiyun 	taofpd = taonpd;
1309*4882a593Smuzhiyun 	twr  = DIV_ROUND_UP(15000, clkper) - 1;
1310*4882a593Smuzhiyun 	tmrd = DIV_ROUND_UP(max(12 * clkper, 15000), clkper) - 1;
1311*4882a593Smuzhiyun 	trc  = DIV_ROUND_UP(ddr3_cfg->trcmin, clkper / 10) - 1;
1312*4882a593Smuzhiyun 	tras = DIV_ROUND_UP(ddr3_cfg->trasmin, clkper / 10) - 1;
1313*4882a593Smuzhiyun 	tcl  = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 3;
1314*4882a593Smuzhiyun 	trp  = DIV_ROUND_UP(ddr3_cfg->trcd, clkper / 10) - 1;
1315*4882a593Smuzhiyun 	twtr = ROUND(max(4 * clkper, 7500) / clkper, 1) - 1;
1316*4882a593Smuzhiyun 	trcd = trp;
1317*4882a593Smuzhiyun 	trtp = twtr;
1318*4882a593Smuzhiyun 	cs0_end = 4 * sysinfo->cs_density - 1;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	debug("density:%d Gb (%d Gb per chip)\n",
1321*4882a593Smuzhiyun 	      sysinfo->cs_density, ddr3_cfg->density);
1322*4882a593Smuzhiyun 	debug("clock: %dMHz (%d ps)\n", clock, clkper);
1323*4882a593Smuzhiyun 	debug("memspd:%d\n", mem_speed);
1324*4882a593Smuzhiyun 	debug("tcke=%d\n", tcke);
1325*4882a593Smuzhiyun 	debug("tcksrx=%d\n", tcksrx);
1326*4882a593Smuzhiyun 	debug("tcksre=%d\n", tcksre);
1327*4882a593Smuzhiyun 	debug("taofpd=%d\n", taofpd);
1328*4882a593Smuzhiyun 	debug("taonpd=%d\n", taonpd);
1329*4882a593Smuzhiyun 	debug("todtlon=%d\n", todtlon);
1330*4882a593Smuzhiyun 	debug("tanpd=%d\n", tanpd);
1331*4882a593Smuzhiyun 	debug("taxpd=%d\n", taxpd);
1332*4882a593Smuzhiyun 	debug("trfc=%d\n", trfc);
1333*4882a593Smuzhiyun 	debug("txs=%d\n", txs);
1334*4882a593Smuzhiyun 	debug("txp=%d\n", txp);
1335*4882a593Smuzhiyun 	debug("txpdll=%d\n", txpdll);
1336*4882a593Smuzhiyun 	debug("tfaw=%d\n", tfaw);
1337*4882a593Smuzhiyun 	debug("tcl=%d\n", tcl);
1338*4882a593Smuzhiyun 	debug("trcd=%d\n", trcd);
1339*4882a593Smuzhiyun 	debug("trp=%d\n", trp);
1340*4882a593Smuzhiyun 	debug("trc=%d\n", trc);
1341*4882a593Smuzhiyun 	debug("tras=%d\n", tras);
1342*4882a593Smuzhiyun 	debug("twr=%d\n", twr);
1343*4882a593Smuzhiyun 	debug("tmrd=%d\n", tmrd);
1344*4882a593Smuzhiyun 	debug("tcwl=%d\n", tcwl);
1345*4882a593Smuzhiyun 	debug("tdllk=%d\n", tdllk);
1346*4882a593Smuzhiyun 	debug("trtp=%d\n", trtp);
1347*4882a593Smuzhiyun 	debug("twtr=%d\n", twtr);
1348*4882a593Smuzhiyun 	debug("trrd=%d\n", trrd);
1349*4882a593Smuzhiyun 	debug("txpr=%d\n", txpr);
1350*4882a593Smuzhiyun 	debug("cs0_end=%d\n", cs0_end);
1351*4882a593Smuzhiyun 	debug("ncs=%d\n", sysinfo->ncs);
1352*4882a593Smuzhiyun 	debug("Rtt_wr=%d\n", sysinfo->rtt_wr);
1353*4882a593Smuzhiyun 	debug("Rtt_nom=%d\n", sysinfo->rtt_nom);
1354*4882a593Smuzhiyun 	debug("SRT=%d\n", ddr3_cfg->SRT);
1355*4882a593Smuzhiyun 	debug("twr=%d\n", twr);
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	/*
1358*4882a593Smuzhiyun 	 * board-specific configuration:
1359*4882a593Smuzhiyun 	 *  These values are determined empirically and vary per board layout
1360*4882a593Smuzhiyun 	 *  see:
1361*4882a593Smuzhiyun 	 *   appnote, ddr3 spreadsheet
1362*4882a593Smuzhiyun 	 */
1363*4882a593Smuzhiyun 	mmdc0->mpwldectrl0 = calib->p0_mpwldectrl0;
1364*4882a593Smuzhiyun 	mmdc0->mpwldectrl1 = calib->p0_mpwldectrl1;
1365*4882a593Smuzhiyun 	mmdc0->mpdgctrl0 = calib->p0_mpdgctrl0;
1366*4882a593Smuzhiyun 	mmdc0->mpdgctrl1 = calib->p0_mpdgctrl1;
1367*4882a593Smuzhiyun 	mmdc0->mprddlctl = calib->p0_mprddlctl;
1368*4882a593Smuzhiyun 	mmdc0->mpwrdlctl = calib->p0_mpwrdlctl;
1369*4882a593Smuzhiyun 	if (sysinfo->dsize > 1) {
1370*4882a593Smuzhiyun 		MMDC1(mpwldectrl0, calib->p1_mpwldectrl0);
1371*4882a593Smuzhiyun 		MMDC1(mpwldectrl1, calib->p1_mpwldectrl1);
1372*4882a593Smuzhiyun 		MMDC1(mpdgctrl0, calib->p1_mpdgctrl0);
1373*4882a593Smuzhiyun 		MMDC1(mpdgctrl1, calib->p1_mpdgctrl1);
1374*4882a593Smuzhiyun 		MMDC1(mprddlctl, calib->p1_mprddlctl);
1375*4882a593Smuzhiyun 		MMDC1(mpwrdlctl, calib->p1_mpwrdlctl);
1376*4882a593Smuzhiyun 	}
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	/* Read data DQ Byte0-3 delay */
1379*4882a593Smuzhiyun 	mmdc0->mprddqby0dl = 0x33333333;
1380*4882a593Smuzhiyun 	mmdc0->mprddqby1dl = 0x33333333;
1381*4882a593Smuzhiyun 	if (sysinfo->dsize > 0) {
1382*4882a593Smuzhiyun 		mmdc0->mprddqby2dl = 0x33333333;
1383*4882a593Smuzhiyun 		mmdc0->mprddqby3dl = 0x33333333;
1384*4882a593Smuzhiyun 	}
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	if (sysinfo->dsize > 1) {
1387*4882a593Smuzhiyun 		MMDC1(mprddqby0dl, 0x33333333);
1388*4882a593Smuzhiyun 		MMDC1(mprddqby1dl, 0x33333333);
1389*4882a593Smuzhiyun 		MMDC1(mprddqby2dl, 0x33333333);
1390*4882a593Smuzhiyun 		MMDC1(mprddqby3dl, 0x33333333);
1391*4882a593Smuzhiyun 	}
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	/* MMDC Termination: rtt_nom:2 RZQ/2(120ohm), rtt_nom:1 RZQ/4(60ohm) */
1394*4882a593Smuzhiyun 	val = (sysinfo->rtt_nom == 2) ? 0x00011117 : 0x00022227;
1395*4882a593Smuzhiyun 	mmdc0->mpodtctrl = val;
1396*4882a593Smuzhiyun 	if (sysinfo->dsize > 1)
1397*4882a593Smuzhiyun 		MMDC1(mpodtctrl, val);
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	/* complete calibration */
1400*4882a593Smuzhiyun 	val = (1 << 11); /* Force measurement on delay-lines */
1401*4882a593Smuzhiyun 	mmdc0->mpmur0 = val;
1402*4882a593Smuzhiyun 	if (sysinfo->dsize > 1)
1403*4882a593Smuzhiyun 		MMDC1(mpmur0, val);
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	/* Step 1: configuration request */
1406*4882a593Smuzhiyun 	mmdc0->mdscr = (u32)(1 << 15); /* config request */
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	/* Step 2: Timing configuration */
1409*4882a593Smuzhiyun 	mmdc0->mdcfg0 = (trfc << 24) | (txs << 16) | (txp << 13) |
1410*4882a593Smuzhiyun 			(txpdll << 9) | (tfaw << 4) | tcl;
1411*4882a593Smuzhiyun 	mmdc0->mdcfg1 = (trcd << 29) | (trp << 26) | (trc << 21) |
1412*4882a593Smuzhiyun 			(tras << 16) | (1 << 15) /* trpa */ |
1413*4882a593Smuzhiyun 			(twr << 9) | (tmrd << 5) | tcwl;
1414*4882a593Smuzhiyun 	mmdc0->mdcfg2 = (tdllk << 16) | (trtp << 6) | (twtr << 3) | trrd;
1415*4882a593Smuzhiyun 	mmdc0->mdotc = (taofpd << 27) | (taonpd << 24) | (tanpd << 20) |
1416*4882a593Smuzhiyun 		       (taxpd << 16) | (todtlon << 12) | (todt_idle_off << 4);
1417*4882a593Smuzhiyun 	mmdc0->mdasp = cs0_end; /* CS addressing */
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	/* Step 3: Configure DDR type */
1420*4882a593Smuzhiyun 	mmdc0->mdmisc = (sysinfo->cs1_mirror << 19) | (sysinfo->walat << 16) |
1421*4882a593Smuzhiyun 			(sysinfo->bi_on << 12) | (sysinfo->mif3_mode << 9) |
1422*4882a593Smuzhiyun 			(sysinfo->ralat << 6);
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	/* Step 4: Configure delay while leaving reset */
1425*4882a593Smuzhiyun 	mmdc0->mdor = (txpr << 16) | (sysinfo->sde_to_rst << 8) |
1426*4882a593Smuzhiyun 		      (sysinfo->rst_to_cke << 0);
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	/* Step 5: Configure DDR physical parameters (density and burst len) */
1429*4882a593Smuzhiyun 	coladdr = ddr3_cfg->coladdr;
1430*4882a593Smuzhiyun 	if (ddr3_cfg->coladdr == 8)		/* 8-bit COL is 0x3 */
1431*4882a593Smuzhiyun 		coladdr += 4;
1432*4882a593Smuzhiyun 	else if (ddr3_cfg->coladdr == 12)	/* 12-bit COL is 0x4 */
1433*4882a593Smuzhiyun 		coladdr += 1;
1434*4882a593Smuzhiyun 	mmdc0->mdctl =  (ddr3_cfg->rowaddr - 11) << 24 |	/* ROW */
1435*4882a593Smuzhiyun 			(coladdr - 9) << 20 |			/* COL */
1436*4882a593Smuzhiyun 			(1 << 19) |		/* Burst Length = 8 for DDR3 */
1437*4882a593Smuzhiyun 			(sysinfo->dsize << 16);		/* DDR data bus size */
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	/* Step 6: Perform ZQ calibration */
1440*4882a593Smuzhiyun 	val = 0xa1390001; /* one-time HW ZQ calib */
1441*4882a593Smuzhiyun 	mmdc0->mpzqhwctrl = val;
1442*4882a593Smuzhiyun 	if (sysinfo->dsize > 1)
1443*4882a593Smuzhiyun 		MMDC1(mpzqhwctrl, val);
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun 	/* Step 7: Enable MMDC with desired chip select */
1446*4882a593Smuzhiyun 	mmdc0->mdctl |= (1 << 31) |			     /* SDE_0 for CS0 */
1447*4882a593Smuzhiyun 			((sysinfo->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	/* Step 8: Write Mode Registers to Init DDR3 devices */
1450*4882a593Smuzhiyun 	for (cs = 0; cs < sysinfo->ncs; cs++) {
1451*4882a593Smuzhiyun 		/* MR2 */
1452*4882a593Smuzhiyun 		val = (sysinfo->rtt_wr & 3) << 9 | (ddr3_cfg->SRT & 1) << 7 |
1453*4882a593Smuzhiyun 		      ((tcwl - 3) & 3) << 3;
1454*4882a593Smuzhiyun 		debug("MR2 CS%d: 0x%08x\n", cs, (u32)MR(val, 2, 3, cs));
1455*4882a593Smuzhiyun 		mmdc0->mdscr = MR(val, 2, 3, cs);
1456*4882a593Smuzhiyun 		/* MR3 */
1457*4882a593Smuzhiyun 		debug("MR3 CS%d: 0x%08x\n", cs, (u32)MR(0, 3, 3, cs));
1458*4882a593Smuzhiyun 		mmdc0->mdscr = MR(0, 3, 3, cs);
1459*4882a593Smuzhiyun 		/* MR1 */
1460*4882a593Smuzhiyun 		val = ((sysinfo->rtt_nom & 1) ? 1 : 0) << 2 |
1461*4882a593Smuzhiyun 		      ((sysinfo->rtt_nom & 2) ? 1 : 0) << 6;
1462*4882a593Smuzhiyun 		debug("MR1 CS%d: 0x%08x\n", cs, (u32)MR(val, 1, 3, cs));
1463*4882a593Smuzhiyun 		mmdc0->mdscr = MR(val, 1, 3, cs);
1464*4882a593Smuzhiyun 		/* MR0 */
1465*4882a593Smuzhiyun 		val = ((tcl - 1) << 4) |	/* CAS */
1466*4882a593Smuzhiyun 		      (1 << 8)   |		/* DLL Reset */
1467*4882a593Smuzhiyun 		      ((twr - 3) << 9) |	/* Write Recovery */
1468*4882a593Smuzhiyun 		      (sysinfo->pd_fast_exit << 12); /* Precharge PD PLL on */
1469*4882a593Smuzhiyun 		debug("MR0 CS%d: 0x%08x\n", cs, (u32)MR(val, 0, 3, cs));
1470*4882a593Smuzhiyun 		mmdc0->mdscr = MR(val, 0, 3, cs);
1471*4882a593Smuzhiyun 		/* ZQ calibration */
1472*4882a593Smuzhiyun 		val = (1 << 10);
1473*4882a593Smuzhiyun 		mmdc0->mdscr = MR(val, 0, 4, cs);
1474*4882a593Smuzhiyun 	}
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	/* Step 10: Power down control and self-refresh */
1477*4882a593Smuzhiyun 	mmdc0->mdpdc = (tcke & 0x7) << 16 |
1478*4882a593Smuzhiyun 			5            << 12 |  /* PWDT_1: 256 cycles */
1479*4882a593Smuzhiyun 			5            <<  8 |  /* PWDT_0: 256 cycles */
1480*4882a593Smuzhiyun 			1            <<  6 |  /* BOTH_CS_PD */
1481*4882a593Smuzhiyun 			(tcksrx & 0x7) << 3 |
1482*4882a593Smuzhiyun 			(tcksre & 0x7);
1483*4882a593Smuzhiyun 	if (!sysinfo->pd_fast_exit)
1484*4882a593Smuzhiyun 		mmdc0->mdpdc |= (1 << 7); /* SLOW_PD */
1485*4882a593Smuzhiyun 	mmdc0->mapsr = 0x00001006; /* ADOPT power down enabled */
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	/* Step 11: Configure ZQ calibration: one-time and periodic 1ms */
1488*4882a593Smuzhiyun 	val = 0xa1390003;
1489*4882a593Smuzhiyun 	mmdc0->mpzqhwctrl = val;
1490*4882a593Smuzhiyun 	if (sysinfo->dsize > 1)
1491*4882a593Smuzhiyun 		MMDC1(mpzqhwctrl, val);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	/* Step 12: Configure and activate periodic refresh */
1494*4882a593Smuzhiyun 	mmdc0->mdref = (sysinfo->refsel << 14) | (sysinfo->refr << 11);
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	/* Step 13: Deassert config request - init complete */
1497*4882a593Smuzhiyun 	mmdc0->mdscr = 0x00000000;
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	/* wait for auto-ZQ calibration to complete */
1500*4882a593Smuzhiyun 	mdelay(1);
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun 
mmdc_read_calibration(struct mx6_ddr_sysinfo const * sysinfo,struct mx6_mmdc_calibration * calib)1503*4882a593Smuzhiyun void mmdc_read_calibration(struct mx6_ddr_sysinfo const *sysinfo,
1504*4882a593Smuzhiyun                            struct mx6_mmdc_calibration *calib)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
1507*4882a593Smuzhiyun 	struct mmdc_p_regs *mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	calib->p0_mpwldectrl0 = readl(&mmdc0->mpwldectrl0);
1510*4882a593Smuzhiyun 	calib->p0_mpwldectrl1 = readl(&mmdc0->mpwldectrl1);
1511*4882a593Smuzhiyun 	calib->p0_mpdgctrl0 = readl(&mmdc0->mpdgctrl0);
1512*4882a593Smuzhiyun 	calib->p0_mpdgctrl1 = readl(&mmdc0->mpdgctrl1);
1513*4882a593Smuzhiyun 	calib->p0_mprddlctl = readl(&mmdc0->mprddlctl);
1514*4882a593Smuzhiyun 	calib->p0_mpwrdlctl = readl(&mmdc0->mpwrdlctl);
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	if (sysinfo->dsize == 2) {
1517*4882a593Smuzhiyun 		calib->p1_mpwldectrl0 = readl(&mmdc1->mpwldectrl0);
1518*4882a593Smuzhiyun 		calib->p1_mpwldectrl1 = readl(&mmdc1->mpwldectrl1);
1519*4882a593Smuzhiyun 		calib->p1_mpdgctrl0 = readl(&mmdc1->mpdgctrl0);
1520*4882a593Smuzhiyun 		calib->p1_mpdgctrl1 = readl(&mmdc1->mpdgctrl1);
1521*4882a593Smuzhiyun 		calib->p1_mprddlctl = readl(&mmdc1->mprddlctl);
1522*4882a593Smuzhiyun 		calib->p1_mpwrdlctl = readl(&mmdc1->mpwrdlctl);
1523*4882a593Smuzhiyun 	}
1524*4882a593Smuzhiyun }
1525*4882a593Smuzhiyun 
mx6_dram_cfg(const struct mx6_ddr_sysinfo * sysinfo,const struct mx6_mmdc_calibration * calib,const void * ddr_cfg)1526*4882a593Smuzhiyun void mx6_dram_cfg(const struct mx6_ddr_sysinfo *sysinfo,
1527*4882a593Smuzhiyun 		  const struct mx6_mmdc_calibration *calib,
1528*4882a593Smuzhiyun 		  const void *ddr_cfg)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun 	if (sysinfo->ddr_type == DDR_TYPE_DDR3) {
1531*4882a593Smuzhiyun 		mx6_ddr3_cfg(sysinfo, calib, ddr_cfg);
1532*4882a593Smuzhiyun 	} else if (sysinfo->ddr_type == DDR_TYPE_LPDDR2) {
1533*4882a593Smuzhiyun 		mx6_lpddr2_cfg(sysinfo, calib, ddr_cfg);
1534*4882a593Smuzhiyun 	} else {
1535*4882a593Smuzhiyun 		puts("Unsupported ddr type\n");
1536*4882a593Smuzhiyun 		hang();
1537*4882a593Smuzhiyun 	}
1538*4882a593Smuzhiyun }
1539