1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2014-2015 Freescale Semiconductor
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <common.h>
8*4882a593Smuzhiyun #include <fsl_ifc.h>
9*4882a593Smuzhiyun #include <ahci.h>
10*4882a593Smuzhiyun #include <scsi.h>
11*4882a593Smuzhiyun #include <asm/arch/fsl_serdes.h>
12*4882a593Smuzhiyun #include <asm/arch/soc.h>
13*4882a593Smuzhiyun #include <asm/io.h>
14*4882a593Smuzhiyun #include <asm/global_data.h>
15*4882a593Smuzhiyun #include <asm/arch-fsl-layerscape/config.h>
16*4882a593Smuzhiyun #ifdef CONFIG_LAYERSCAPE_NS_ACCESS
17*4882a593Smuzhiyun #include <fsl_csu.h>
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_DDR
20*4882a593Smuzhiyun #include <fsl_ddr_sdram.h>
21*4882a593Smuzhiyun #include <fsl_ddr.h>
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun #ifdef CONFIG_CHAIN_OF_TRUST
24*4882a593Smuzhiyun #include <fsl_validate.h>
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
28*4882a593Smuzhiyun
soc_has_dp_ddr(void)29*4882a593Smuzhiyun bool soc_has_dp_ddr(void)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
32*4882a593Smuzhiyun u32 svr = gur_in32(&gur->svr);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* LS2085A, LS2088A, LS2048A has DP_DDR */
35*4882a593Smuzhiyun if ((SVR_SOC_VER(svr) == SVR_LS2085A) ||
36*4882a593Smuzhiyun (SVR_SOC_VER(svr) == SVR_LS2088A) ||
37*4882a593Smuzhiyun (SVR_SOC_VER(svr) == SVR_LS2048A))
38*4882a593Smuzhiyun return true;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun return false;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
soc_has_aiop(void)43*4882a593Smuzhiyun bool soc_has_aiop(void)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
46*4882a593Smuzhiyun u32 svr = gur_in32(&gur->svr);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* LS2085A has AIOP */
49*4882a593Smuzhiyun if (SVR_SOC_VER(svr) == SVR_LS2085A)
50*4882a593Smuzhiyun return true;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun return false;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #if defined(CONFIG_FSL_LSCH3)
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * This erratum requires setting a value to eddrtqcr1 to
58*4882a593Smuzhiyun * optimal the DDR performance.
59*4882a593Smuzhiyun */
erratum_a008336(void)60*4882a593Smuzhiyun static void erratum_a008336(void)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A008336
63*4882a593Smuzhiyun u32 *eddrtqcr1;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_DCSR_DDR_ADDR
66*4882a593Smuzhiyun eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR_ADDR + 0x800;
67*4882a593Smuzhiyun if (fsl_ddr_get_version(0) == 0x50200)
68*4882a593Smuzhiyun out_le32(eddrtqcr1, 0x63b30002);
69*4882a593Smuzhiyun #endif
70*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_DCSR_DDR2_ADDR
71*4882a593Smuzhiyun eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR2_ADDR + 0x800;
72*4882a593Smuzhiyun if (fsl_ddr_get_version(0) == 0x50200)
73*4882a593Smuzhiyun out_le32(eddrtqcr1, 0x63b30002);
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun #endif
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * This erratum requires a register write before being Memory
80*4882a593Smuzhiyun * controller 3 being enabled.
81*4882a593Smuzhiyun */
erratum_a008514(void)82*4882a593Smuzhiyun static void erratum_a008514(void)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A008514
85*4882a593Smuzhiyun u32 *eddrtqcr1;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_DCSR_DDR3_ADDR
88*4882a593Smuzhiyun eddrtqcr1 = (void *)CONFIG_SYS_FSL_DCSR_DDR3_ADDR + 0x800;
89*4882a593Smuzhiyun out_le32(eddrtqcr1, 0x63b20002);
90*4882a593Smuzhiyun #endif
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
94*4882a593Smuzhiyun #define PLATFORM_CYCLE_ENV_VAR "a009635_interval_val"
95*4882a593Smuzhiyun
get_internval_val_mhz(void)96*4882a593Smuzhiyun static unsigned long get_internval_val_mhz(void)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun char *interval = env_get(PLATFORM_CYCLE_ENV_VAR);
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * interval is the number of platform cycles(MHz) between
101*4882a593Smuzhiyun * wake up events generated by EPU.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun ulong interval_mhz = get_bus_freq(0) / (1000 * 1000);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (interval)
106*4882a593Smuzhiyun interval_mhz = simple_strtoul(interval, NULL, 10);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun return interval_mhz;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
erratum_a009635(void)111*4882a593Smuzhiyun void erratum_a009635(void)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun u32 val;
114*4882a593Smuzhiyun unsigned long interval_mhz = get_internval_val_mhz();
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (!interval_mhz)
117*4882a593Smuzhiyun return;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun val = in_le32(DCSR_CGACRE5);
120*4882a593Smuzhiyun writel(val | 0x00000200, DCSR_CGACRE5);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun val = in_le32(EPU_EPCMPR5);
123*4882a593Smuzhiyun writel(interval_mhz, EPU_EPCMPR5);
124*4882a593Smuzhiyun val = in_le32(EPU_EPCCR5);
125*4882a593Smuzhiyun writel(val | 0x82820000, EPU_EPCCR5);
126*4882a593Smuzhiyun val = in_le32(EPU_EPSMCR5);
127*4882a593Smuzhiyun writel(val | 0x002f0000, EPU_EPSMCR5);
128*4882a593Smuzhiyun val = in_le32(EPU_EPECR5);
129*4882a593Smuzhiyun writel(val | 0x20000000, EPU_EPECR5);
130*4882a593Smuzhiyun val = in_le32(EPU_EPGCR);
131*4882a593Smuzhiyun writel(val | 0x80000000, EPU_EPGCR);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun #endif /* CONFIG_SYS_FSL_ERRATUM_A009635 */
134*4882a593Smuzhiyun
erratum_rcw_src(void)135*4882a593Smuzhiyun static void erratum_rcw_src(void)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun #if defined(CONFIG_SPL) && defined(CONFIG_NAND_BOOT)
138*4882a593Smuzhiyun u32 __iomem *dcfg_ccsr = (u32 __iomem *)DCFG_BASE;
139*4882a593Smuzhiyun u32 __iomem *dcfg_dcsr = (u32 __iomem *)DCFG_DCSR_BASE;
140*4882a593Smuzhiyun u32 val;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun val = in_le32(dcfg_ccsr + DCFG_PORSR1 / 4);
143*4882a593Smuzhiyun val &= ~DCFG_PORSR1_RCW_SRC;
144*4882a593Smuzhiyun val |= DCFG_PORSR1_RCW_SRC_NOR;
145*4882a593Smuzhiyun out_le32(dcfg_dcsr + DCFG_DCSR_PORCR1 / 4, val);
146*4882a593Smuzhiyun #endif
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #define I2C_DEBUG_REG 0x6
150*4882a593Smuzhiyun #define I2C_GLITCH_EN 0x8
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun * This erratum requires setting glitch_en bit to enable
153*4882a593Smuzhiyun * digital glitch filter to improve clock stability.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A009203
erratum_a009203(void)156*4882a593Smuzhiyun static void erratum_a009203(void)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun u8 __iomem *ptr;
159*4882a593Smuzhiyun #ifdef CONFIG_SYS_I2C
160*4882a593Smuzhiyun #ifdef I2C1_BASE_ADDR
161*4882a593Smuzhiyun ptr = (u8 __iomem *)(I2C1_BASE_ADDR + I2C_DEBUG_REG);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun writeb(I2C_GLITCH_EN, ptr);
164*4882a593Smuzhiyun #endif
165*4882a593Smuzhiyun #ifdef I2C2_BASE_ADDR
166*4882a593Smuzhiyun ptr = (u8 __iomem *)(I2C2_BASE_ADDR + I2C_DEBUG_REG);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun writeb(I2C_GLITCH_EN, ptr);
169*4882a593Smuzhiyun #endif
170*4882a593Smuzhiyun #ifdef I2C3_BASE_ADDR
171*4882a593Smuzhiyun ptr = (u8 __iomem *)(I2C3_BASE_ADDR + I2C_DEBUG_REG);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun writeb(I2C_GLITCH_EN, ptr);
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun #ifdef I2C4_BASE_ADDR
176*4882a593Smuzhiyun ptr = (u8 __iomem *)(I2C4_BASE_ADDR + I2C_DEBUG_REG);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun writeb(I2C_GLITCH_EN, ptr);
179*4882a593Smuzhiyun #endif
180*4882a593Smuzhiyun #endif
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun #endif
183*4882a593Smuzhiyun
bypass_smmu(void)184*4882a593Smuzhiyun void bypass_smmu(void)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun u32 val;
187*4882a593Smuzhiyun val = (in_le32(SMMU_SCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK);
188*4882a593Smuzhiyun out_le32(SMMU_SCR0, val);
189*4882a593Smuzhiyun val = (in_le32(SMMU_NSCR0) | SCR0_CLIENTPD_MASK) & ~(SCR0_USFCFG_MASK);
190*4882a593Smuzhiyun out_le32(SMMU_NSCR0, val);
191*4882a593Smuzhiyun }
fsl_lsch3_early_init_f(void)192*4882a593Smuzhiyun void fsl_lsch3_early_init_f(void)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun erratum_rcw_src();
195*4882a593Smuzhiyun init_early_memctl_regs(); /* tighten IFC timing */
196*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A009203
197*4882a593Smuzhiyun erratum_a009203();
198*4882a593Smuzhiyun #endif
199*4882a593Smuzhiyun erratum_a008514();
200*4882a593Smuzhiyun erratum_a008336();
201*4882a593Smuzhiyun #ifdef CONFIG_CHAIN_OF_TRUST
202*4882a593Smuzhiyun /* In case of Secure Boot, the IBR configures the SMMU
203*4882a593Smuzhiyun * to allow only Secure transactions.
204*4882a593Smuzhiyun * SMMU must be reset in bypass mode.
205*4882a593Smuzhiyun * Set the ClientPD bit and Clear the USFCFG Bit
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun if (fsl_check_boot_mode_secure() == 1)
208*4882a593Smuzhiyun bypass_smmu();
209*4882a593Smuzhiyun #endif
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun #ifdef CONFIG_SCSI_AHCI_PLAT
sata_init(void)213*4882a593Smuzhiyun int sata_init(void)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct ccsr_ahci __iomem *ccsr_ahci;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun ccsr_ahci = (void *)CONFIG_SYS_SATA2;
218*4882a593Smuzhiyun out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG);
219*4882a593Smuzhiyun out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG);
220*4882a593Smuzhiyun out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun ccsr_ahci = (void *)CONFIG_SYS_SATA1;
223*4882a593Smuzhiyun out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG);
224*4882a593Smuzhiyun out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG);
225*4882a593Smuzhiyun out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun ahci_init((void __iomem *)CONFIG_SYS_SATA1);
228*4882a593Smuzhiyun scsi_scan(false);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun return 0;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun #endif
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun #elif defined(CONFIG_FSL_LSCH2)
235*4882a593Smuzhiyun #ifdef CONFIG_SCSI_AHCI_PLAT
sata_init(void)236*4882a593Smuzhiyun int sata_init(void)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun struct ccsr_ahci __iomem *ccsr_ahci = (void *)CONFIG_SYS_SATA;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Disable SATA ECC */
241*4882a593Smuzhiyun out_le32((void *)CONFIG_SYS_DCSR_DCFG_ADDR + 0x520, 0x80000000);
242*4882a593Smuzhiyun out_le32(&ccsr_ahci->ppcfg, AHCI_PORT_PHY_1_CFG);
243*4882a593Smuzhiyun out_le32(&ccsr_ahci->ptc, AHCI_PORT_TRANS_CFG);
244*4882a593Smuzhiyun out_le32(&ccsr_ahci->axicc, AHCI_PORT_AXICC_CFG);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun ahci_init((void __iomem *)CONFIG_SYS_SATA);
247*4882a593Smuzhiyun scsi_scan(false);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun return 0;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun
erratum_a009929(void)253*4882a593Smuzhiyun static void erratum_a009929(void)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A009929
256*4882a593Smuzhiyun struct ccsr_gur *gur = (void *)CONFIG_SYS_FSL_GUTS_ADDR;
257*4882a593Smuzhiyun u32 __iomem *dcsr_cop_ccp = (void *)CONFIG_SYS_DCSR_COP_CCP_ADDR;
258*4882a593Smuzhiyun u32 rstrqmr1 = gur_in32(&gur->rstrqmr1);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun rstrqmr1 |= 0x00000400;
261*4882a593Smuzhiyun gur_out32(&gur->rstrqmr1, rstrqmr1);
262*4882a593Smuzhiyun writel(0x01000000, dcsr_cop_ccp);
263*4882a593Smuzhiyun #endif
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * This erratum requires setting a value to eddrtqcr1 to optimal
268*4882a593Smuzhiyun * the DDR performance. The eddrtqcr1 register is in SCFG space
269*4882a593Smuzhiyun * of LS1043A and the offset is 0x157_020c.
270*4882a593Smuzhiyun */
271*4882a593Smuzhiyun #if defined(CONFIG_SYS_FSL_ERRATUM_A009660) \
272*4882a593Smuzhiyun && defined(CONFIG_SYS_FSL_ERRATUM_A008514)
273*4882a593Smuzhiyun #error A009660 and A008514 can not be both enabled.
274*4882a593Smuzhiyun #endif
275*4882a593Smuzhiyun
erratum_a009660(void)276*4882a593Smuzhiyun static void erratum_a009660(void)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A009660
279*4882a593Smuzhiyun u32 *eddrtqcr1 = (void *)CONFIG_SYS_FSL_SCFG_ADDR + 0x20c;
280*4882a593Smuzhiyun out_be32(eddrtqcr1, 0x63b20042);
281*4882a593Smuzhiyun #endif
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
erratum_a008850_early(void)284*4882a593Smuzhiyun static void erratum_a008850_early(void)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A008850
287*4882a593Smuzhiyun /* part 1 of 2 */
288*4882a593Smuzhiyun struct ccsr_cci400 __iomem *cci = (void *)CONFIG_SYS_CCI400_ADDR;
289*4882a593Smuzhiyun struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* Skip if running at lower exception level */
292*4882a593Smuzhiyun if (current_el() < 3)
293*4882a593Smuzhiyun return;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* disables propagation of barrier transactions to DDRC from CCI400 */
296*4882a593Smuzhiyun out_le32(&cci->ctrl_ord, CCI400_CTRLORD_TERM_BARRIER);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* disable the re-ordering in DDRC */
299*4882a593Smuzhiyun ddr_out32(&ddr->eor, DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS);
300*4882a593Smuzhiyun #endif
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
erratum_a008850_post(void)303*4882a593Smuzhiyun void erratum_a008850_post(void)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A008850
306*4882a593Smuzhiyun /* part 2 of 2 */
307*4882a593Smuzhiyun struct ccsr_cci400 __iomem *cci = (void *)CONFIG_SYS_CCI400_ADDR;
308*4882a593Smuzhiyun struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR;
309*4882a593Smuzhiyun u32 tmp;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* Skip if running at lower exception level */
312*4882a593Smuzhiyun if (current_el() < 3)
313*4882a593Smuzhiyun return;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /* enable propagation of barrier transactions to DDRC from CCI400 */
316*4882a593Smuzhiyun out_le32(&cci->ctrl_ord, CCI400_CTRLORD_EN_BARRIER);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* enable the re-ordering in DDRC */
319*4882a593Smuzhiyun tmp = ddr_in32(&ddr->eor);
320*4882a593Smuzhiyun tmp &= ~(DDR_EOR_RD_REOD_DIS | DDR_EOR_WD_REOD_DIS);
321*4882a593Smuzhiyun ddr_out32(&ddr->eor, tmp);
322*4882a593Smuzhiyun #endif
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A010315
erratum_a010315(void)326*4882a593Smuzhiyun void erratum_a010315(void)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun int i;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun for (i = PCIE1; i <= PCIE4; i++)
331*4882a593Smuzhiyun if (!is_serdes_configured(i)) {
332*4882a593Smuzhiyun debug("PCIe%d: disabled all R/W permission!\n", i);
333*4882a593Smuzhiyun set_pcie_ns_access(i, 0);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun #endif
337*4882a593Smuzhiyun
erratum_a010539(void)338*4882a593Smuzhiyun static void erratum_a010539(void)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun #if defined(CONFIG_SYS_FSL_ERRATUM_A010539) && defined(CONFIG_QSPI_BOOT)
341*4882a593Smuzhiyun struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
342*4882a593Smuzhiyun u32 porsr1;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun porsr1 = in_be32(&gur->porsr1);
345*4882a593Smuzhiyun porsr1 &= ~FSL_CHASSIS2_CCSR_PORSR1_RCW_MASK;
346*4882a593Smuzhiyun out_be32((void *)(CONFIG_SYS_DCSR_DCFG_ADDR + DCFG_DCSR_PORCR1),
347*4882a593Smuzhiyun porsr1);
348*4882a593Smuzhiyun #endif
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* Get VDD in the unit mV from voltage ID */
get_core_volt_from_fuse(void)352*4882a593Smuzhiyun int get_core_volt_from_fuse(void)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
355*4882a593Smuzhiyun int vdd;
356*4882a593Smuzhiyun u32 fusesr;
357*4882a593Smuzhiyun u8 vid;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun fusesr = in_be32(&gur->dcfg_fusesr);
360*4882a593Smuzhiyun debug("%s: fusesr = 0x%x\n", __func__, fusesr);
361*4882a593Smuzhiyun vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_ALTVID_SHIFT) &
362*4882a593Smuzhiyun FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK;
363*4882a593Smuzhiyun if ((vid == 0) || (vid == FSL_CHASSIS2_DCFG_FUSESR_ALTVID_MASK)) {
364*4882a593Smuzhiyun vid = (fusesr >> FSL_CHASSIS2_DCFG_FUSESR_VID_SHIFT) &
365*4882a593Smuzhiyun FSL_CHASSIS2_DCFG_FUSESR_VID_MASK;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun debug("%s: VID = 0x%x\n", __func__, vid);
368*4882a593Smuzhiyun switch (vid) {
369*4882a593Smuzhiyun case 0x00: /* VID isn't supported */
370*4882a593Smuzhiyun vdd = -EINVAL;
371*4882a593Smuzhiyun debug("%s: The VID feature is not supported\n", __func__);
372*4882a593Smuzhiyun break;
373*4882a593Smuzhiyun case 0x08: /* 0.9V silicon */
374*4882a593Smuzhiyun vdd = 900;
375*4882a593Smuzhiyun break;
376*4882a593Smuzhiyun case 0x10: /* 1.0V silicon */
377*4882a593Smuzhiyun vdd = 1000;
378*4882a593Smuzhiyun break;
379*4882a593Smuzhiyun default: /* Other core voltage */
380*4882a593Smuzhiyun vdd = -EINVAL;
381*4882a593Smuzhiyun printf("%s: The VID(%x) isn't supported\n", __func__, vid);
382*4882a593Smuzhiyun break;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun debug("%s: The required minimum volt of CORE is %dmV\n", __func__, vdd);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return vdd;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
board_switch_core_volt(u32 vdd)389*4882a593Smuzhiyun __weak int board_switch_core_volt(u32 vdd)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
setup_core_volt(u32 vdd)394*4882a593Smuzhiyun static int setup_core_volt(u32 vdd)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun return board_setup_core_volt(vdd);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_DDR
ddr_enable_0v9_volt(bool en)400*4882a593Smuzhiyun static void ddr_enable_0v9_volt(bool en)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun struct ccsr_ddr __iomem *ddr = (void *)CONFIG_SYS_FSL_DDR_ADDR;
403*4882a593Smuzhiyun u32 tmp;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun tmp = ddr_in32(&ddr->ddr_cdr1);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun if (en)
408*4882a593Smuzhiyun tmp |= DDR_CDR1_V0PT9_EN;
409*4882a593Smuzhiyun else
410*4882a593Smuzhiyun tmp &= ~DDR_CDR1_V0PT9_EN;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun ddr_out32(&ddr->ddr_cdr1, tmp);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun #endif
415*4882a593Smuzhiyun
setup_chip_volt(void)416*4882a593Smuzhiyun int setup_chip_volt(void)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun int vdd;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun vdd = get_core_volt_from_fuse();
421*4882a593Smuzhiyun /* Nothing to do for silicons doesn't support VID */
422*4882a593Smuzhiyun if (vdd < 0)
423*4882a593Smuzhiyun return vdd;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (setup_core_volt(vdd))
426*4882a593Smuzhiyun printf("%s: Switch core VDD to %dmV failed\n", __func__, vdd);
427*4882a593Smuzhiyun #ifdef CONFIG_SYS_HAS_SERDES
428*4882a593Smuzhiyun if (setup_serdes_volt(vdd))
429*4882a593Smuzhiyun printf("%s: Switch SVDD to %dmV failed\n", __func__, vdd);
430*4882a593Smuzhiyun #endif
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_DDR
433*4882a593Smuzhiyun if (vdd == 900)
434*4882a593Smuzhiyun ddr_enable_0v9_volt(true);
435*4882a593Smuzhiyun #endif
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun return 0;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
fsl_lsch2_early_init_f(void)440*4882a593Smuzhiyun void fsl_lsch2_early_init_f(void)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun struct ccsr_cci400 *cci = (struct ccsr_cci400 *)CONFIG_SYS_CCI400_ADDR;
443*4882a593Smuzhiyun struct ccsr_scfg *scfg = (struct ccsr_scfg *)CONFIG_SYS_FSL_SCFG_ADDR;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun #ifdef CONFIG_LAYERSCAPE_NS_ACCESS
446*4882a593Smuzhiyun enable_layerscape_ns_access();
447*4882a593Smuzhiyun #endif
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun #ifdef CONFIG_FSL_IFC
450*4882a593Smuzhiyun init_early_memctl_regs(); /* tighten IFC timing */
451*4882a593Smuzhiyun #endif
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun #if defined(CONFIG_FSL_QSPI) && !defined(CONFIG_QSPI_BOOT)
454*4882a593Smuzhiyun out_be32(&scfg->qspi_cfg, SCFG_QSPI_CLKSEL);
455*4882a593Smuzhiyun #endif
456*4882a593Smuzhiyun /* Make SEC reads and writes snoopable */
457*4882a593Smuzhiyun setbits_be32(&scfg->snpcnfgcr, SCFG_SNPCNFGCR_SECRDSNP |
458*4882a593Smuzhiyun SCFG_SNPCNFGCR_SECWRSNP |
459*4882a593Smuzhiyun SCFG_SNPCNFGCR_SATARDSNP |
460*4882a593Smuzhiyun SCFG_SNPCNFGCR_SATAWRSNP);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun * Enable snoop requests and DVM message requests for
464*4882a593Smuzhiyun * Slave insterface S4 (A53 core cluster)
465*4882a593Smuzhiyun */
466*4882a593Smuzhiyun if (current_el() == 3) {
467*4882a593Smuzhiyun out_le32(&cci->slave[4].snoop_ctrl,
468*4882a593Smuzhiyun CCI400_DVM_MESSAGE_REQ_EN | CCI400_SNOOP_REQ_EN);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /* Erratum */
472*4882a593Smuzhiyun erratum_a008850_early(); /* part 1 of 2 */
473*4882a593Smuzhiyun erratum_a009929();
474*4882a593Smuzhiyun erratum_a009660();
475*4882a593Smuzhiyun erratum_a010539();
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun #endif
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun #ifdef CONFIG_QSPI_AHB_INIT
480*4882a593Smuzhiyun /* Enable 4bytes address support and fast read */
qspi_ahb_init(void)481*4882a593Smuzhiyun int qspi_ahb_init(void)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun u32 *qspi_lut, lut_key, *qspi_key;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun qspi_key = (void *)SYS_FSL_QSPI_ADDR + 0x300;
486*4882a593Smuzhiyun qspi_lut = (void *)SYS_FSL_QSPI_ADDR + 0x310;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun lut_key = in_be32(qspi_key);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (lut_key == 0x5af05af0) {
491*4882a593Smuzhiyun /* That means the register is BE */
492*4882a593Smuzhiyun out_be32(qspi_key, 0x5af05af0);
493*4882a593Smuzhiyun /* Unlock the lut table */
494*4882a593Smuzhiyun out_be32(qspi_key + 1, 0x00000002);
495*4882a593Smuzhiyun out_be32(qspi_lut, 0x0820040c);
496*4882a593Smuzhiyun out_be32(qspi_lut + 1, 0x1c080c08);
497*4882a593Smuzhiyun out_be32(qspi_lut + 2, 0x00002400);
498*4882a593Smuzhiyun /* Lock the lut table */
499*4882a593Smuzhiyun out_be32(qspi_key, 0x5af05af0);
500*4882a593Smuzhiyun out_be32(qspi_key + 1, 0x00000001);
501*4882a593Smuzhiyun } else {
502*4882a593Smuzhiyun /* That means the register is LE */
503*4882a593Smuzhiyun out_le32(qspi_key, 0x5af05af0);
504*4882a593Smuzhiyun /* Unlock the lut table */
505*4882a593Smuzhiyun out_le32(qspi_key + 1, 0x00000002);
506*4882a593Smuzhiyun out_le32(qspi_lut, 0x0820040c);
507*4882a593Smuzhiyun out_le32(qspi_lut + 1, 0x1c080c08);
508*4882a593Smuzhiyun out_le32(qspi_lut + 2, 0x00002400);
509*4882a593Smuzhiyun /* Lock the lut table */
510*4882a593Smuzhiyun out_le32(qspi_key, 0x5af05af0);
511*4882a593Smuzhiyun out_le32(qspi_key + 1, 0x00000001);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun return 0;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun #endif
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun #ifdef CONFIG_BOARD_LATE_INIT
board_late_init(void)519*4882a593Smuzhiyun int board_late_init(void)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun #ifdef CONFIG_SCSI_AHCI_PLAT
522*4882a593Smuzhiyun sata_init();
523*4882a593Smuzhiyun #endif
524*4882a593Smuzhiyun #ifdef CONFIG_CHAIN_OF_TRUST
525*4882a593Smuzhiyun fsl_setenv_chain_of_trust();
526*4882a593Smuzhiyun #endif
527*4882a593Smuzhiyun #ifdef CONFIG_QSPI_AHB_INIT
528*4882a593Smuzhiyun qspi_ahb_init();
529*4882a593Smuzhiyun #endif
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun return 0;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun #endif
534