xref: /OK3568_Linux_fs/u-boot/arch/powerpc/cpu/mpc85xx/cpu_init.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2007-2011 Freescale Semiconductor, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * (C) Copyright 2003 Motorola Inc.
5*4882a593Smuzhiyun  * Modified by Xianghua Xiao, X.Xiao@motorola.com
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * (C) Copyright 2000
8*4882a593Smuzhiyun  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <common.h>
14*4882a593Smuzhiyun #include <watchdog.h>
15*4882a593Smuzhiyun #include <asm/processor.h>
16*4882a593Smuzhiyun #include <ioports.h>
17*4882a593Smuzhiyun #include <sata.h>
18*4882a593Smuzhiyun #include <fm_eth.h>
19*4882a593Smuzhiyun #include <asm/io.h>
20*4882a593Smuzhiyun #include <asm/cache.h>
21*4882a593Smuzhiyun #include <asm/mmu.h>
22*4882a593Smuzhiyun #include <fsl_errata.h>
23*4882a593Smuzhiyun #include <asm/fsl_law.h>
24*4882a593Smuzhiyun #include <asm/fsl_serdes.h>
25*4882a593Smuzhiyun #include <asm/fsl_srio.h>
26*4882a593Smuzhiyun #ifdef CONFIG_FSL_CORENET
27*4882a593Smuzhiyun #include <asm/fsl_portals.h>
28*4882a593Smuzhiyun #include <asm/fsl_liodn.h>
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun #include <fsl_usb.h>
31*4882a593Smuzhiyun #include <hwconfig.h>
32*4882a593Smuzhiyun #include <linux/compiler.h>
33*4882a593Smuzhiyun #include "mp.h"
34*4882a593Smuzhiyun #ifdef CONFIG_CHAIN_OF_TRUST
35*4882a593Smuzhiyun #include <fsl_validate.h>
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun #ifdef CONFIG_FSL_CAAM
38*4882a593Smuzhiyun #include <fsl_sec.h>
39*4882a593Smuzhiyun #endif
40*4882a593Smuzhiyun #if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_FSL_CORENET)
41*4882a593Smuzhiyun #include <asm/fsl_pamu.h>
42*4882a593Smuzhiyun #include <fsl_secboot_err.h>
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun #ifdef CONFIG_SYS_QE_FMAN_FW_IN_NAND
45*4882a593Smuzhiyun #include <nand.h>
46*4882a593Smuzhiyun #include <errno.h>
47*4882a593Smuzhiyun #endif
48*4882a593Smuzhiyun #ifndef CONFIG_ARCH_QEMU_E500
49*4882a593Smuzhiyun #include <fsl_ddr.h>
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun #include "../../../../drivers/ata/fsl_sata.h"
52*4882a593Smuzhiyun #ifdef CONFIG_U_QE
53*4882a593Smuzhiyun #include <fsl_qe.h>
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_SINGLE_SOURCE_CLK
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun  * For deriving usb clock from 100MHz sysclk, reference divisor is set
61*4882a593Smuzhiyun  * to a value of 5, which gives an intermediate value 20(100/5). The
62*4882a593Smuzhiyun  * multiplication factor integer is set to 24, which when multiplied to
63*4882a593Smuzhiyun  * above intermediate value provides clock for usb ip.
64*4882a593Smuzhiyun  */
usb_single_source_clk_configure(struct ccsr_usb_phy * usb_phy)65*4882a593Smuzhiyun void usb_single_source_clk_configure(struct ccsr_usb_phy *usb_phy)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	sys_info_t sysinfo;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	get_sys_info(&sysinfo);
70*4882a593Smuzhiyun 	if (sysinfo.diff_sysclk == 1) {
71*4882a593Smuzhiyun 		clrbits_be32(&usb_phy->pllprg[1],
72*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_PLLPRG2_MFI);
73*4882a593Smuzhiyun 		setbits_be32(&usb_phy->pllprg[1],
74*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_PLLPRG2_REF_DIV_INTERNAL_CLK |
75*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_PLLPRG2_MFI_INTERNAL_CLK |
76*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_INTERNAL_SOC_CLK_EN);
77*4882a593Smuzhiyun 		}
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A006261
fsl_erratum_a006261_workaround(struct ccsr_usb_phy __iomem * usb_phy)82*4882a593Smuzhiyun void fsl_erratum_a006261_workaround(struct ccsr_usb_phy __iomem *usb_phy)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_USB_DUAL_PHY_ENABLE
85*4882a593Smuzhiyun 	u32 xcvrprg = in_be32(&usb_phy->port1.xcvrprg);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* Increase Disconnect Threshold by 50mV */
88*4882a593Smuzhiyun 	xcvrprg &= ~CONFIG_SYS_FSL_USB_XCVRPRG_HS_DCNT_PROG_MASK |
89*4882a593Smuzhiyun 						INC_DCNT_THRESHOLD_50MV;
90*4882a593Smuzhiyun 	/* Enable programming of USB High speed Disconnect threshold */
91*4882a593Smuzhiyun 	xcvrprg |= CONFIG_SYS_FSL_USB_XCVRPRG_HS_DCNT_PROG_EN;
92*4882a593Smuzhiyun 	out_be32(&usb_phy->port1.xcvrprg, xcvrprg);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	xcvrprg = in_be32(&usb_phy->port2.xcvrprg);
95*4882a593Smuzhiyun 	/* Increase Disconnect Threshold by 50mV */
96*4882a593Smuzhiyun 	xcvrprg &= ~CONFIG_SYS_FSL_USB_XCVRPRG_HS_DCNT_PROG_MASK |
97*4882a593Smuzhiyun 						INC_DCNT_THRESHOLD_50MV;
98*4882a593Smuzhiyun 	/* Enable programming of USB High speed Disconnect threshold */
99*4882a593Smuzhiyun 	xcvrprg |= CONFIG_SYS_FSL_USB_XCVRPRG_HS_DCNT_PROG_EN;
100*4882a593Smuzhiyun 	out_be32(&usb_phy->port2.xcvrprg, xcvrprg);
101*4882a593Smuzhiyun #else
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	u32 temp = 0;
104*4882a593Smuzhiyun 	u32 status = in_be32(&usb_phy->status1);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	u32 squelch_prog_rd_0_2 =
107*4882a593Smuzhiyun 		(status >> CONFIG_SYS_FSL_USB_SQUELCH_PROG_RD_0)
108*4882a593Smuzhiyun 			& CONFIG_SYS_FSL_USB_SQUELCH_PROG_MASK;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	u32 squelch_prog_rd_3_5 =
111*4882a593Smuzhiyun 		(status >> CONFIG_SYS_FSL_USB_SQUELCH_PROG_RD_3)
112*4882a593Smuzhiyun 			& CONFIG_SYS_FSL_USB_SQUELCH_PROG_MASK;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	setbits_be32(&usb_phy->config1,
115*4882a593Smuzhiyun 		     CONFIG_SYS_FSL_USB_HS_DISCNCT_INC);
116*4882a593Smuzhiyun 	setbits_be32(&usb_phy->config2,
117*4882a593Smuzhiyun 		     CONFIG_SYS_FSL_USB_RX_AUTO_CAL_RD_WR_SEL);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	temp = squelch_prog_rd_0_2 << CONFIG_SYS_FSL_USB_SQUELCH_PROG_WR_3;
120*4882a593Smuzhiyun 	out_be32(&usb_phy->config2, in_be32(&usb_phy->config2) | temp);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	temp = squelch_prog_rd_3_5 << CONFIG_SYS_FSL_USB_SQUELCH_PROG_WR_0;
123*4882a593Smuzhiyun 	out_be32(&usb_phy->config2, in_be32(&usb_phy->config2) | temp);
124*4882a593Smuzhiyun #endif
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun #endif
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #if defined(CONFIG_QE) && !defined(CONFIG_U_QE)
130*4882a593Smuzhiyun extern qe_iop_conf_t qe_iop_conf_tab[];
131*4882a593Smuzhiyun extern void qe_config_iopin(u8 port, u8 pin, int dir,
132*4882a593Smuzhiyun 				int open_drain, int assign);
133*4882a593Smuzhiyun extern void qe_init(uint qe_base);
134*4882a593Smuzhiyun extern void qe_reset(void);
135*4882a593Smuzhiyun 
config_qe_ioports(void)136*4882a593Smuzhiyun static void config_qe_ioports(void)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	u8      port, pin;
139*4882a593Smuzhiyun 	int     dir, open_drain, assign;
140*4882a593Smuzhiyun 	int     i;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	for (i = 0; qe_iop_conf_tab[i].assign != QE_IOP_TAB_END; i++) {
143*4882a593Smuzhiyun 		port		= qe_iop_conf_tab[i].port;
144*4882a593Smuzhiyun 		pin		= qe_iop_conf_tab[i].pin;
145*4882a593Smuzhiyun 		dir		= qe_iop_conf_tab[i].dir;
146*4882a593Smuzhiyun 		open_drain	= qe_iop_conf_tab[i].open_drain;
147*4882a593Smuzhiyun 		assign		= qe_iop_conf_tab[i].assign;
148*4882a593Smuzhiyun 		qe_config_iopin(port, pin, dir, open_drain, assign);
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun #endif
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun #ifdef CONFIG_CPM2
config_8560_ioports(volatile ccsr_cpm_t * cpm)154*4882a593Smuzhiyun void config_8560_ioports (volatile ccsr_cpm_t * cpm)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	int portnum;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	for (portnum = 0; portnum < 4; portnum++) {
159*4882a593Smuzhiyun 		uint pmsk = 0,
160*4882a593Smuzhiyun 		     ppar = 0,
161*4882a593Smuzhiyun 		     psor = 0,
162*4882a593Smuzhiyun 		     pdir = 0,
163*4882a593Smuzhiyun 		     podr = 0,
164*4882a593Smuzhiyun 		     pdat = 0;
165*4882a593Smuzhiyun 		iop_conf_t *iopc = (iop_conf_t *) & iop_conf_tab[portnum][0];
166*4882a593Smuzhiyun 		iop_conf_t *eiopc = iopc + 32;
167*4882a593Smuzhiyun 		uint msk = 1;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		/*
170*4882a593Smuzhiyun 		 * NOTE:
171*4882a593Smuzhiyun 		 * index 0 refers to pin 31,
172*4882a593Smuzhiyun 		 * index 31 refers to pin 0
173*4882a593Smuzhiyun 		 */
174*4882a593Smuzhiyun 		while (iopc < eiopc) {
175*4882a593Smuzhiyun 			if (iopc->conf) {
176*4882a593Smuzhiyun 				pmsk |= msk;
177*4882a593Smuzhiyun 				if (iopc->ppar)
178*4882a593Smuzhiyun 					ppar |= msk;
179*4882a593Smuzhiyun 				if (iopc->psor)
180*4882a593Smuzhiyun 					psor |= msk;
181*4882a593Smuzhiyun 				if (iopc->pdir)
182*4882a593Smuzhiyun 					pdir |= msk;
183*4882a593Smuzhiyun 				if (iopc->podr)
184*4882a593Smuzhiyun 					podr |= msk;
185*4882a593Smuzhiyun 				if (iopc->pdat)
186*4882a593Smuzhiyun 					pdat |= msk;
187*4882a593Smuzhiyun 			}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 			msk <<= 1;
190*4882a593Smuzhiyun 			iopc++;
191*4882a593Smuzhiyun 		}
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		if (pmsk != 0) {
194*4882a593Smuzhiyun 			volatile ioport_t *iop = ioport_addr (cpm, portnum);
195*4882a593Smuzhiyun 			uint tpmsk = ~pmsk;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 			/*
198*4882a593Smuzhiyun 			 * the (somewhat confused) paragraph at the
199*4882a593Smuzhiyun 			 * bottom of page 35-5 warns that there might
200*4882a593Smuzhiyun 			 * be "unknown behaviour" when programming
201*4882a593Smuzhiyun 			 * PSORx and PDIRx, if PPARx = 1, so I
202*4882a593Smuzhiyun 			 * decided this meant I had to disable the
203*4882a593Smuzhiyun 			 * dedicated function first, and enable it
204*4882a593Smuzhiyun 			 * last.
205*4882a593Smuzhiyun 			 */
206*4882a593Smuzhiyun 			iop->ppar &= tpmsk;
207*4882a593Smuzhiyun 			iop->psor = (iop->psor & tpmsk) | psor;
208*4882a593Smuzhiyun 			iop->podr = (iop->podr & tpmsk) | podr;
209*4882a593Smuzhiyun 			iop->pdat = (iop->pdat & tpmsk) | pdat;
210*4882a593Smuzhiyun 			iop->pdir = (iop->pdir & tpmsk) | pdir;
211*4882a593Smuzhiyun 			iop->ppar |= ppar;
212*4882a593Smuzhiyun 		}
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun #endif
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_CPC
218*4882a593Smuzhiyun #if defined(CONFIG_RAMBOOT_PBL) || defined(CONFIG_SYS_CPC_REINIT_F)
disable_cpc_sram(void)219*4882a593Smuzhiyun void disable_cpc_sram(void)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	int i;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
226*4882a593Smuzhiyun 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN) {
227*4882a593Smuzhiyun 			/* find and disable LAW of SRAM */
228*4882a593Smuzhiyun 			struct law_entry law = find_law(CONFIG_SYS_INIT_L3_ADDR);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 			if (law.index == -1) {
231*4882a593Smuzhiyun 				printf("\nFatal error happened\n");
232*4882a593Smuzhiyun 				return;
233*4882a593Smuzhiyun 			}
234*4882a593Smuzhiyun 			disable_law(law.index);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 			clrbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_CDQ_SPEC_DIS);
237*4882a593Smuzhiyun 			out_be32(&cpc->cpccsr0, 0);
238*4882a593Smuzhiyun 			out_be32(&cpc->cpcsrcr0, 0);
239*4882a593Smuzhiyun 		}
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun #endif
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun #if defined(T1040_TDM_QUIRK_CCSR_BASE)
245*4882a593Smuzhiyun #ifdef CONFIG_POST
246*4882a593Smuzhiyun #error POST memory test cannot be enabled with TDM
247*4882a593Smuzhiyun #endif
enable_tdm_law(void)248*4882a593Smuzhiyun static void enable_tdm_law(void)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	int ret;
251*4882a593Smuzhiyun 	char buffer[HWCONFIG_BUFFER_SIZE] = {0};
252*4882a593Smuzhiyun 	int tdm_hwconfig_enabled = 0;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/*
255*4882a593Smuzhiyun 	 * Extract hwconfig from environment since environment
256*4882a593Smuzhiyun 	 * is not setup properly yet. Search for tdm entry in
257*4882a593Smuzhiyun 	 * hwconfig.
258*4882a593Smuzhiyun 	 */
259*4882a593Smuzhiyun 	ret = env_get_f("hwconfig", buffer, sizeof(buffer));
260*4882a593Smuzhiyun 	if (ret > 0) {
261*4882a593Smuzhiyun 		tdm_hwconfig_enabled = hwconfig_f("tdm", buffer);
262*4882a593Smuzhiyun 		/* If tdm is defined in hwconfig, set law for tdm workaround */
263*4882a593Smuzhiyun 		if (tdm_hwconfig_enabled)
264*4882a593Smuzhiyun 			set_next_law(T1040_TDM_QUIRK_CCSR_BASE, LAW_SIZE_16M,
265*4882a593Smuzhiyun 				     LAW_TRGT_IF_CCSR);
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun 
enable_cpc(void)270*4882a593Smuzhiyun void enable_cpc(void)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	int i;
273*4882a593Smuzhiyun 	int ret;
274*4882a593Smuzhiyun 	u32 size = 0;
275*4882a593Smuzhiyun 	u32 cpccfg0;
276*4882a593Smuzhiyun 	char buffer[HWCONFIG_BUFFER_SIZE];
277*4882a593Smuzhiyun 	char cpc_subarg[16];
278*4882a593Smuzhiyun 	bool have_hwconfig = false;
279*4882a593Smuzhiyun 	int cpc_args = 0;
280*4882a593Smuzhiyun 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	/* Extract hwconfig from environment */
283*4882a593Smuzhiyun 	ret = env_get_f("hwconfig", buffer, sizeof(buffer));
284*4882a593Smuzhiyun 	if (ret > 0) {
285*4882a593Smuzhiyun 		/*
286*4882a593Smuzhiyun 		 * If "en_cpc" is not defined in hwconfig then by default all
287*4882a593Smuzhiyun 		 * cpcs are enable. If this config is defined then individual
288*4882a593Smuzhiyun 		 * cpcs which have to be enabled should also be defined.
289*4882a593Smuzhiyun 		 * e.g en_cpc:cpc1,cpc2;
290*4882a593Smuzhiyun 		 */
291*4882a593Smuzhiyun 		if (hwconfig_f("en_cpc", buffer))
292*4882a593Smuzhiyun 			have_hwconfig = true;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
296*4882a593Smuzhiyun 		if (have_hwconfig) {
297*4882a593Smuzhiyun 			sprintf(cpc_subarg, "cpc%u", i + 1);
298*4882a593Smuzhiyun 			cpc_args = hwconfig_sub_f("en_cpc", cpc_subarg, buffer);
299*4882a593Smuzhiyun 			if (cpc_args == 0)
300*4882a593Smuzhiyun 				continue;
301*4882a593Smuzhiyun 		}
302*4882a593Smuzhiyun 		cpccfg0 = in_be32(&cpc->cpccfg0);
303*4882a593Smuzhiyun 		size += CPC_CFG0_SZ_K(cpccfg0);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A002
306*4882a593Smuzhiyun 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_TAG_ECC_SCRUB_DIS);
307*4882a593Smuzhiyun #endif
308*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_CPC_A003
309*4882a593Smuzhiyun 		setbits_be32(&cpc->cpchdbcr0, CPC_HDBCR0_DATA_ECC_SCRUB_DIS);
310*4882a593Smuzhiyun #endif
311*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A006593
312*4882a593Smuzhiyun 		setbits_be32(&cpc->cpchdbcr0, 1 << (31 - 21));
313*4882a593Smuzhiyun #endif
314*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A006379
315*4882a593Smuzhiyun 		if (has_erratum_a006379()) {
316*4882a593Smuzhiyun 			setbits_be32(&cpc->cpchdbcr0,
317*4882a593Smuzhiyun 				     CPC_HDBCR0_SPLRU_LEVEL_EN);
318*4882a593Smuzhiyun 		}
319*4882a593Smuzhiyun #endif
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		out_be32(&cpc->cpccsr0, CPC_CSR0_CE | CPC_CSR0_PE);
322*4882a593Smuzhiyun 		/* Read back to sync write */
323*4882a593Smuzhiyun 		in_be32(&cpc->cpccsr0);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	puts("Corenet Platform Cache: ");
328*4882a593Smuzhiyun 	print_size(size * 1024, " enabled\n");
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
invalidate_cpc(void)331*4882a593Smuzhiyun static void invalidate_cpc(void)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	int i;
334*4882a593Smuzhiyun 	cpc_corenet_t *cpc = (cpc_corenet_t *)CONFIG_SYS_FSL_CPC_ADDR;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	for (i = 0; i < CONFIG_SYS_NUM_CPC; i++, cpc++) {
337*4882a593Smuzhiyun 		/* skip CPC when it used as all SRAM */
338*4882a593Smuzhiyun 		if (in_be32(&cpc->cpcsrcr0) & CPC_SRCR0_SRAMEN)
339*4882a593Smuzhiyun 			continue;
340*4882a593Smuzhiyun 		/* Flash invalidate the CPC and clear all the locks */
341*4882a593Smuzhiyun 		out_be32(&cpc->cpccsr0, CPC_CSR0_FI | CPC_CSR0_LFC);
342*4882a593Smuzhiyun 		while (in_be32(&cpc->cpccsr0) & (CPC_CSR0_FI | CPC_CSR0_LFC))
343*4882a593Smuzhiyun 			;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun #else
347*4882a593Smuzhiyun #define enable_cpc()
348*4882a593Smuzhiyun #define invalidate_cpc()
349*4882a593Smuzhiyun #define disable_cpc_sram()
350*4882a593Smuzhiyun #endif /* CONFIG_SYS_FSL_CPC */
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun  * Breathe some life into the CPU...
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  * Set up the memory map
356*4882a593Smuzhiyun  * initialize a bunch of registers
357*4882a593Smuzhiyun  */
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun #ifdef CONFIG_FSL_CORENET
corenet_tb_init(void)360*4882a593Smuzhiyun static void corenet_tb_init(void)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	volatile ccsr_rcpm_t *rcpm =
363*4882a593Smuzhiyun 		(void *)(CONFIG_SYS_FSL_CORENET_RCPM_ADDR);
364*4882a593Smuzhiyun 	volatile ccsr_pic_t *pic =
365*4882a593Smuzhiyun 		(void *)(CONFIG_SYS_MPC8xxx_PIC_ADDR);
366*4882a593Smuzhiyun 	u32 whoami = in_be32(&pic->whoami);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/* Enable the timebase register for this core */
369*4882a593Smuzhiyun 	out_be32(&rcpm->ctbenrl, (1 << whoami));
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun #endif
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A007212
fsl_erratum_a007212_workaround(void)374*4882a593Smuzhiyun void fsl_erratum_a007212_workaround(void)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	ccsr_gur_t __iomem *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
377*4882a593Smuzhiyun 	u32 ddr_pll_ratio;
378*4882a593Smuzhiyun 	u32 __iomem *plldgdcr1 = (void *)(CONFIG_SYS_DCSRBAR + 0x21c20);
379*4882a593Smuzhiyun 	u32 __iomem *plldadcr1 = (void *)(CONFIG_SYS_DCSRBAR + 0x21c28);
380*4882a593Smuzhiyun 	u32 __iomem *dpdovrcr4 = (void *)(CONFIG_SYS_DCSRBAR + 0x21e80);
381*4882a593Smuzhiyun #if (CONFIG_SYS_NUM_DDR_CTLRS >= 2)
382*4882a593Smuzhiyun 	u32 __iomem *plldgdcr2 = (void *)(CONFIG_SYS_DCSRBAR + 0x21c40);
383*4882a593Smuzhiyun 	u32 __iomem *plldadcr2 = (void *)(CONFIG_SYS_DCSRBAR + 0x21c48);
384*4882a593Smuzhiyun #if (CONFIG_SYS_NUM_DDR_CTLRS >= 3)
385*4882a593Smuzhiyun 	u32 __iomem *plldgdcr3 = (void *)(CONFIG_SYS_DCSRBAR + 0x21c60);
386*4882a593Smuzhiyun 	u32 __iomem *plldadcr3 = (void *)(CONFIG_SYS_DCSRBAR + 0x21c68);
387*4882a593Smuzhiyun #endif
388*4882a593Smuzhiyun #endif
389*4882a593Smuzhiyun 	/*
390*4882a593Smuzhiyun 	 * Even this workaround applies to selected version of SoCs, it is
391*4882a593Smuzhiyun 	 * safe to apply to all versions, with the limitation of odd ratios.
392*4882a593Smuzhiyun 	 * If RCW has disabled DDR PLL, we have to apply this workaround,
393*4882a593Smuzhiyun 	 * otherwise DDR will not work.
394*4882a593Smuzhiyun 	 */
395*4882a593Smuzhiyun 	ddr_pll_ratio = (in_be32(&gur->rcwsr[0]) >>
396*4882a593Smuzhiyun 		FSL_CORENET_RCWSR0_MEM_PLL_RAT_SHIFT) &
397*4882a593Smuzhiyun 		FSL_CORENET_RCWSR0_MEM_PLL_RAT_MASK;
398*4882a593Smuzhiyun 	/* check if RCW sets ratio to 0, required by this workaround */
399*4882a593Smuzhiyun 	if (ddr_pll_ratio != 0)
400*4882a593Smuzhiyun 		return;
401*4882a593Smuzhiyun 	ddr_pll_ratio = (in_be32(&gur->rcwsr[0]) >>
402*4882a593Smuzhiyun 		FSL_CORENET_RCWSR0_MEM_PLL_RAT_RESV_SHIFT) &
403*4882a593Smuzhiyun 		FSL_CORENET_RCWSR0_MEM_PLL_RAT_MASK;
404*4882a593Smuzhiyun 	/* check if reserved bits have the desired ratio */
405*4882a593Smuzhiyun 	if (ddr_pll_ratio == 0) {
406*4882a593Smuzhiyun 		printf("Error: Unknown DDR PLL ratio!\n");
407*4882a593Smuzhiyun 		return;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	ddr_pll_ratio >>= 1;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	setbits_be32(plldadcr1, 0x02000001);
412*4882a593Smuzhiyun #if (CONFIG_SYS_NUM_DDR_CTLRS >= 2)
413*4882a593Smuzhiyun 	setbits_be32(plldadcr2, 0x02000001);
414*4882a593Smuzhiyun #if (CONFIG_SYS_NUM_DDR_CTLRS >= 3)
415*4882a593Smuzhiyun 	setbits_be32(plldadcr3, 0x02000001);
416*4882a593Smuzhiyun #endif
417*4882a593Smuzhiyun #endif
418*4882a593Smuzhiyun 	setbits_be32(dpdovrcr4, 0xe0000000);
419*4882a593Smuzhiyun 	out_be32(plldgdcr1, 0x08000001 | (ddr_pll_ratio << 1));
420*4882a593Smuzhiyun #if (CONFIG_SYS_NUM_DDR_CTLRS >= 2)
421*4882a593Smuzhiyun 	out_be32(plldgdcr2, 0x08000001 | (ddr_pll_ratio << 1));
422*4882a593Smuzhiyun #if (CONFIG_SYS_NUM_DDR_CTLRS >= 3)
423*4882a593Smuzhiyun 	out_be32(plldgdcr3, 0x08000001 | (ddr_pll_ratio << 1));
424*4882a593Smuzhiyun #endif
425*4882a593Smuzhiyun #endif
426*4882a593Smuzhiyun 	udelay(100);
427*4882a593Smuzhiyun 	clrbits_be32(plldadcr1, 0x02000001);
428*4882a593Smuzhiyun #if (CONFIG_SYS_NUM_DDR_CTLRS >= 2)
429*4882a593Smuzhiyun 	clrbits_be32(plldadcr2, 0x02000001);
430*4882a593Smuzhiyun #if (CONFIG_SYS_NUM_DDR_CTLRS >= 3)
431*4882a593Smuzhiyun 	clrbits_be32(plldadcr3, 0x02000001);
432*4882a593Smuzhiyun #endif
433*4882a593Smuzhiyun #endif
434*4882a593Smuzhiyun 	clrbits_be32(dpdovrcr4, 0xe0000000);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun #endif
437*4882a593Smuzhiyun 
cpu_init_f(void)438*4882a593Smuzhiyun ulong cpu_init_f(void)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	extern void m8560_cpm_reset (void);
441*4882a593Smuzhiyun #ifdef CONFIG_SYS_DCSRBAR_PHYS
442*4882a593Smuzhiyun 	ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
443*4882a593Smuzhiyun #endif
444*4882a593Smuzhiyun #if defined(CONFIG_SECURE_BOOT) && !defined(CONFIG_SYS_RAMBOOT)
445*4882a593Smuzhiyun 	struct law_entry law;
446*4882a593Smuzhiyun #endif
447*4882a593Smuzhiyun #ifdef CONFIG_ARCH_MPC8548
448*4882a593Smuzhiyun 	ccsr_local_ecm_t *ecm = (void *)(CONFIG_SYS_MPC85xx_ECM_ADDR);
449*4882a593Smuzhiyun 	uint svr = get_svr();
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/*
452*4882a593Smuzhiyun 	 * CPU2 errata workaround: A core hang possible while executing
453*4882a593Smuzhiyun 	 * a msync instruction and a snoopable transaction from an I/O
454*4882a593Smuzhiyun 	 * master tagged to make quick forward progress is present.
455*4882a593Smuzhiyun 	 * Fixed in silicon rev 2.1.
456*4882a593Smuzhiyun 	 */
457*4882a593Smuzhiyun 	if ((SVR_MAJ(svr) == 1) || ((SVR_MAJ(svr) == 2 && SVR_MIN(svr) == 0x0)))
458*4882a593Smuzhiyun 		out_be32(&ecm->eebpcr, in_be32(&ecm->eebpcr) | (1 << 16));
459*4882a593Smuzhiyun #endif
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	disable_tlb(14);
462*4882a593Smuzhiyun 	disable_tlb(15);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun #if defined(CONFIG_SECURE_BOOT) && !defined(CONFIG_SYS_RAMBOOT)
465*4882a593Smuzhiyun 	/* Disable the LAW created for NOR flash by the PBI commands */
466*4882a593Smuzhiyun 	law = find_law(CONFIG_SYS_PBI_FLASH_BASE);
467*4882a593Smuzhiyun 	if (law.index != -1)
468*4882a593Smuzhiyun 		disable_law(law.index);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun #if defined(CONFIG_SYS_CPC_REINIT_F)
471*4882a593Smuzhiyun 	disable_cpc_sram();
472*4882a593Smuzhiyun #endif
473*4882a593Smuzhiyun #endif
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun #ifdef CONFIG_CPM2
476*4882a593Smuzhiyun 	config_8560_ioports((ccsr_cpm_t *)CONFIG_SYS_MPC85xx_CPM_ADDR);
477*4882a593Smuzhiyun #endif
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun        init_early_memctl_regs();
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun #if defined(CONFIG_CPM2)
482*4882a593Smuzhiyun 	m8560_cpm_reset();
483*4882a593Smuzhiyun #endif
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun #if defined(CONFIG_QE) && !defined(CONFIG_U_QE)
486*4882a593Smuzhiyun 	/* Config QE ioports */
487*4882a593Smuzhiyun 	config_qe_ioports();
488*4882a593Smuzhiyun #endif
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun #if defined(CONFIG_FSL_DMA)
491*4882a593Smuzhiyun 	dma_init();
492*4882a593Smuzhiyun #endif
493*4882a593Smuzhiyun #ifdef CONFIG_FSL_CORENET
494*4882a593Smuzhiyun 	corenet_tb_init();
495*4882a593Smuzhiyun #endif
496*4882a593Smuzhiyun 	init_used_tlb_cams();
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	/* Invalidate the CPC before DDR gets enabled */
499*4882a593Smuzhiyun 	invalidate_cpc();
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun  #ifdef CONFIG_SYS_DCSRBAR_PHYS
502*4882a593Smuzhiyun 	/* set DCSRCR so that DCSR space is 1G */
503*4882a593Smuzhiyun 	setbits_be32(&gur->dcsrcr, FSL_CORENET_DCSR_SZ_1G);
504*4882a593Smuzhiyun 	in_be32(&gur->dcsrcr);
505*4882a593Smuzhiyun #endif
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A007212
508*4882a593Smuzhiyun 	fsl_erratum_a007212_workaround();
509*4882a593Smuzhiyun #endif
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	return 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun /* Implement a dummy function for those platforms w/o SERDES */
__fsl_serdes__init(void)515*4882a593Smuzhiyun static void __fsl_serdes__init(void)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	return ;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun __attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun #if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
enable_cluster_l2(void)522*4882a593Smuzhiyun int enable_cluster_l2(void)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	int i = 0;
525*4882a593Smuzhiyun 	u32 cluster, svr = get_svr();
526*4882a593Smuzhiyun 	ccsr_gur_t *gur = (void __iomem *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
527*4882a593Smuzhiyun 	struct ccsr_cluster_l2 __iomem *l2cache;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	/* only the L2 of first cluster should be enabled as expected on T4080,
530*4882a593Smuzhiyun 	 * but there is no EOC in the first cluster as HW sake, so return here
531*4882a593Smuzhiyun 	 * to skip enabling L2 cache of the 2nd cluster.
532*4882a593Smuzhiyun 	 */
533*4882a593Smuzhiyun 	if (SVR_SOC_VER(svr) == SVR_T4080)
534*4882a593Smuzhiyun 		return 0;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	cluster = in_be32(&gur->tp_cluster[i].lower);
537*4882a593Smuzhiyun 	if (cluster & TP_CLUSTER_EOC)
538*4882a593Smuzhiyun 		return 0;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	/* The first cache has already been set up, so skip it */
541*4882a593Smuzhiyun 	i++;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* Look through the remaining clusters, and set up their caches */
544*4882a593Smuzhiyun 	do {
545*4882a593Smuzhiyun 		int j, cluster_valid = 0;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		l2cache = (void __iomem *)(CONFIG_SYS_FSL_CLUSTER_1_L2 + i * 0x40000);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 		cluster = in_be32(&gur->tp_cluster[i].lower);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 		/* check that at least one core/accel is enabled in cluster */
552*4882a593Smuzhiyun 		for (j = 0; j < 4; j++) {
553*4882a593Smuzhiyun 			u32 idx = (cluster >> (j*8)) & TP_CLUSTER_INIT_MASK;
554*4882a593Smuzhiyun 			u32 type = in_be32(&gur->tp_ityp[idx]);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 			if ((type & TP_ITYP_AV) &&
557*4882a593Smuzhiyun 			    TP_ITYP_TYPE(type) == TP_ITYP_TYPE_PPC)
558*4882a593Smuzhiyun 				cluster_valid = 1;
559*4882a593Smuzhiyun 		}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		if (cluster_valid) {
562*4882a593Smuzhiyun 			/* set stash ID to (cluster) * 2 + 32 + 1 */
563*4882a593Smuzhiyun 			clrsetbits_be32(&l2cache->l2csr1, 0xff, 32 + i * 2 + 1);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 			printf("enable l2 for cluster %d %p\n", i, l2cache);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 			out_be32(&l2cache->l2csr0, L2CSR0_L2FI|L2CSR0_L2LFC);
568*4882a593Smuzhiyun 			while ((in_be32(&l2cache->l2csr0)
569*4882a593Smuzhiyun 				& (L2CSR0_L2FI|L2CSR0_L2LFC)) != 0)
570*4882a593Smuzhiyun 					;
571*4882a593Smuzhiyun 			out_be32(&l2cache->l2csr0, L2CSR0_L2E|L2CSR0_L2PE|L2CSR0_L2REP_MODE);
572*4882a593Smuzhiyun 		}
573*4882a593Smuzhiyun 		i++;
574*4882a593Smuzhiyun 	} while (!(cluster & TP_CLUSTER_EOC));
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun #endif
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun  * Initialize L2 as cache.
582*4882a593Smuzhiyun  */
l2cache_init(void)583*4882a593Smuzhiyun int l2cache_init(void)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	__maybe_unused u32 svr = get_svr();
586*4882a593Smuzhiyun #ifdef CONFIG_L2_CACHE
587*4882a593Smuzhiyun 	ccsr_l2cache_t *l2cache = (void __iomem *)CONFIG_SYS_MPC85xx_L2_ADDR;
588*4882a593Smuzhiyun #elif defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
589*4882a593Smuzhiyun 	struct ccsr_cluster_l2 * l2cache = (void __iomem *)CONFIG_SYS_FSL_CLUSTER_1_L2;
590*4882a593Smuzhiyun #endif
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	puts ("L2:    ");
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun #if defined(CONFIG_L2_CACHE)
595*4882a593Smuzhiyun 	volatile uint cache_ctl;
596*4882a593Smuzhiyun 	uint ver;
597*4882a593Smuzhiyun 	u32 l2siz_field;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	ver = SVR_SOC_VER(svr);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	asm("msync;isync");
602*4882a593Smuzhiyun 	cache_ctl = l2cache->l2ctl;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun #if defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SYS_INIT_L2_ADDR)
605*4882a593Smuzhiyun 	if (cache_ctl & MPC85xx_L2CTL_L2E) {
606*4882a593Smuzhiyun 		/* Clear L2 SRAM memory-mapped base address */
607*4882a593Smuzhiyun 		out_be32(&l2cache->l2srbar0, 0x0);
608*4882a593Smuzhiyun 		out_be32(&l2cache->l2srbar1, 0x0);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 		/* set MBECCDIS=0, SBECCDIS=0 */
611*4882a593Smuzhiyun 		clrbits_be32(&l2cache->l2errdis,
612*4882a593Smuzhiyun 				(MPC85xx_L2ERRDIS_MBECC |
613*4882a593Smuzhiyun 				 MPC85xx_L2ERRDIS_SBECC));
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 		/* set L2E=0, L2SRAM=0 */
616*4882a593Smuzhiyun 		clrbits_be32(&l2cache->l2ctl,
617*4882a593Smuzhiyun 				(MPC85xx_L2CTL_L2E |
618*4882a593Smuzhiyun 				 MPC85xx_L2CTL_L2SRAM_ENTIRE));
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun #endif
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	l2siz_field = (cache_ctl >> 28) & 0x3;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	switch (l2siz_field) {
625*4882a593Smuzhiyun 	case 0x0:
626*4882a593Smuzhiyun 		printf(" unknown size (0x%08x)\n", cache_ctl);
627*4882a593Smuzhiyun 		return -1;
628*4882a593Smuzhiyun 		break;
629*4882a593Smuzhiyun 	case 0x1:
630*4882a593Smuzhiyun 		if (ver == SVR_8540 || ver == SVR_8560   ||
631*4882a593Smuzhiyun 		    ver == SVR_8541 || ver == SVR_8555) {
632*4882a593Smuzhiyun 			puts("128 KiB ");
633*4882a593Smuzhiyun 			/* set L2E=1, L2I=1, & L2BLKSZ=1 (128 KiBibyte) */
634*4882a593Smuzhiyun 			cache_ctl = 0xc4000000;
635*4882a593Smuzhiyun 		} else {
636*4882a593Smuzhiyun 			puts("256 KiB ");
637*4882a593Smuzhiyun 			cache_ctl = 0xc0000000; /* set L2E=1, L2I=1, & L2SRAM=0 */
638*4882a593Smuzhiyun 		}
639*4882a593Smuzhiyun 		break;
640*4882a593Smuzhiyun 	case 0x2:
641*4882a593Smuzhiyun 		if (ver == SVR_8540 || ver == SVR_8560   ||
642*4882a593Smuzhiyun 		    ver == SVR_8541 || ver == SVR_8555) {
643*4882a593Smuzhiyun 			puts("256 KiB ");
644*4882a593Smuzhiyun 			/* set L2E=1, L2I=1, & L2BLKSZ=2 (256 KiBibyte) */
645*4882a593Smuzhiyun 			cache_ctl = 0xc8000000;
646*4882a593Smuzhiyun 		} else {
647*4882a593Smuzhiyun 			puts("512 KiB ");
648*4882a593Smuzhiyun 			/* set L2E=1, L2I=1, & L2SRAM=0 */
649*4882a593Smuzhiyun 			cache_ctl = 0xc0000000;
650*4882a593Smuzhiyun 		}
651*4882a593Smuzhiyun 		break;
652*4882a593Smuzhiyun 	case 0x3:
653*4882a593Smuzhiyun 		puts("1024 KiB ");
654*4882a593Smuzhiyun 		/* set L2E=1, L2I=1, & L2SRAM=0 */
655*4882a593Smuzhiyun 		cache_ctl = 0xc0000000;
656*4882a593Smuzhiyun 		break;
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (l2cache->l2ctl & MPC85xx_L2CTL_L2E) {
660*4882a593Smuzhiyun 		puts("already enabled");
661*4882a593Smuzhiyun #if defined(CONFIG_SYS_INIT_L2_ADDR) && defined(CONFIG_SYS_FLASH_BASE)
662*4882a593Smuzhiyun 		u32 l2srbar = l2cache->l2srbar0;
663*4882a593Smuzhiyun 		if (l2cache->l2ctl & MPC85xx_L2CTL_L2SRAM_ENTIRE
664*4882a593Smuzhiyun 				&& l2srbar >= CONFIG_SYS_FLASH_BASE) {
665*4882a593Smuzhiyun 			l2srbar = CONFIG_SYS_INIT_L2_ADDR;
666*4882a593Smuzhiyun 			l2cache->l2srbar0 = l2srbar;
667*4882a593Smuzhiyun 			printf(", moving to 0x%08x", CONFIG_SYS_INIT_L2_ADDR);
668*4882a593Smuzhiyun 		}
669*4882a593Smuzhiyun #endif /* CONFIG_SYS_INIT_L2_ADDR */
670*4882a593Smuzhiyun 		puts("\n");
671*4882a593Smuzhiyun 	} else {
672*4882a593Smuzhiyun 		asm("msync;isync");
673*4882a593Smuzhiyun 		l2cache->l2ctl = cache_ctl; /* invalidate & enable */
674*4882a593Smuzhiyun 		asm("msync;isync");
675*4882a593Smuzhiyun 		puts("enabled\n");
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun #elif defined(CONFIG_BACKSIDE_L2_CACHE)
678*4882a593Smuzhiyun 	if (SVR_SOC_VER(svr) == SVR_P2040) {
679*4882a593Smuzhiyun 		puts("N/A\n");
680*4882a593Smuzhiyun 		goto skip_l2;
681*4882a593Smuzhiyun 	}
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	u32 l2cfg0 = mfspr(SPRN_L2CFG0);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/* invalidate the L2 cache */
686*4882a593Smuzhiyun 	mtspr(SPRN_L2CSR0, (L2CSR0_L2FI|L2CSR0_L2LFC));
687*4882a593Smuzhiyun 	while (mfspr(SPRN_L2CSR0) & (L2CSR0_L2FI|L2CSR0_L2LFC))
688*4882a593Smuzhiyun 		;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun #ifdef CONFIG_SYS_CACHE_STASHING
691*4882a593Smuzhiyun 	/* set stash id to (coreID) * 2 + 32 + L2 (1) */
692*4882a593Smuzhiyun 	mtspr(SPRN_L2CSR1, (32 + 1));
693*4882a593Smuzhiyun #endif
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	/* enable the cache */
696*4882a593Smuzhiyun 	mtspr(SPRN_L2CSR0, CONFIG_SYS_INIT_L2CSR0);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	if (CONFIG_SYS_INIT_L2CSR0 & L2CSR0_L2E) {
699*4882a593Smuzhiyun 		while (!(mfspr(SPRN_L2CSR0) & L2CSR0_L2E))
700*4882a593Smuzhiyun 			;
701*4882a593Smuzhiyun 		print_size((l2cfg0 & 0x3fff) * 64 * 1024, " enabled\n");
702*4882a593Smuzhiyun 	}
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun skip_l2:
705*4882a593Smuzhiyun #elif defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
706*4882a593Smuzhiyun 	if (l2cache->l2csr0 & L2CSR0_L2E)
707*4882a593Smuzhiyun 		print_size((l2cache->l2cfg0 & 0x3fff) * 64 * 1024,
708*4882a593Smuzhiyun 			   " enabled\n");
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	enable_cluster_l2();
711*4882a593Smuzhiyun #else
712*4882a593Smuzhiyun 	puts("disabled\n");
713*4882a593Smuzhiyun #endif
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	return 0;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun /*
719*4882a593Smuzhiyun  *
720*4882a593Smuzhiyun  * The newer 8548, etc, parts have twice as much cache, but
721*4882a593Smuzhiyun  * use the same bit-encoding as the older 8555, etc, parts.
722*4882a593Smuzhiyun  *
723*4882a593Smuzhiyun  */
cpu_init_r(void)724*4882a593Smuzhiyun int cpu_init_r(void)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	__maybe_unused u32 svr = get_svr();
727*4882a593Smuzhiyun #ifdef CONFIG_SYS_LBC_LCRR
728*4882a593Smuzhiyun 	fsl_lbc_t *lbc = (void __iomem *)LBC_BASE_ADDR;
729*4882a593Smuzhiyun #endif
730*4882a593Smuzhiyun #if defined(CONFIG_PPC_SPINTABLE_COMPATIBLE) && defined(CONFIG_MP)
731*4882a593Smuzhiyun 	extern int spin_table_compat;
732*4882a593Smuzhiyun 	const char *spin;
733*4882a593Smuzhiyun #endif
734*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_SEC_A003571
735*4882a593Smuzhiyun 	ccsr_sec_t __iomem *sec = (void *)CONFIG_SYS_FSL_SEC_ADDR;
736*4882a593Smuzhiyun #endif
737*4882a593Smuzhiyun #if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) || \
738*4882a593Smuzhiyun 	defined(CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011)
739*4882a593Smuzhiyun 	/*
740*4882a593Smuzhiyun 	 * CPU22 and NMG_CPU_A011 share the same workaround.
741*4882a593Smuzhiyun 	 * CPU22 applies to P4080 rev 1.0, 2.0, fixed in 3.0
742*4882a593Smuzhiyun 	 * NMG_CPU_A011 applies to P4080 rev 1.0, 2.0, fixed in 3.0
743*4882a593Smuzhiyun 	 * also applies to P3041 rev 1.0, 1.1, P2041 rev 1.0, 1.1, both
744*4882a593Smuzhiyun 	 * fixed in 2.0. NMG_CPU_A011 is activated by default and can
745*4882a593Smuzhiyun 	 * be disabled by hwconfig with syntax:
746*4882a593Smuzhiyun 	 *
747*4882a593Smuzhiyun 	 * fsl_cpu_a011:disable
748*4882a593Smuzhiyun 	 */
749*4882a593Smuzhiyun 	extern int enable_cpu_a011_workaround;
750*4882a593Smuzhiyun #ifdef CONFIG_SYS_P4080_ERRATUM_CPU22
751*4882a593Smuzhiyun 	enable_cpu_a011_workaround = (SVR_MAJ(svr) < 3);
752*4882a593Smuzhiyun #else
753*4882a593Smuzhiyun 	char buffer[HWCONFIG_BUFFER_SIZE];
754*4882a593Smuzhiyun 	char *buf = NULL;
755*4882a593Smuzhiyun 	int n, res;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	n = env_get_f("hwconfig", buffer, sizeof(buffer));
758*4882a593Smuzhiyun 	if (n > 0)
759*4882a593Smuzhiyun 		buf = buffer;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	res = hwconfig_arg_cmp_f("fsl_cpu_a011", "disable", buf);
762*4882a593Smuzhiyun 	if (res > 0) {
763*4882a593Smuzhiyun 		enable_cpu_a011_workaround = 0;
764*4882a593Smuzhiyun 	} else {
765*4882a593Smuzhiyun 		if (n >= HWCONFIG_BUFFER_SIZE) {
766*4882a593Smuzhiyun 			printf("fsl_cpu_a011 was not found. hwconfig variable "
767*4882a593Smuzhiyun 				"may be too long\n");
768*4882a593Smuzhiyun 		}
769*4882a593Smuzhiyun 		enable_cpu_a011_workaround =
770*4882a593Smuzhiyun 			(SVR_SOC_VER(svr) == SVR_P4080 && SVR_MAJ(svr) < 3) ||
771*4882a593Smuzhiyun 			(SVR_SOC_VER(svr) != SVR_P4080 && SVR_MAJ(svr) < 2);
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun #endif
774*4882a593Smuzhiyun 	if (enable_cpu_a011_workaround) {
775*4882a593Smuzhiyun 		flush_dcache();
776*4882a593Smuzhiyun 		mtspr(L1CSR2, (mfspr(L1CSR2) | L1CSR2_DCWS));
777*4882a593Smuzhiyun 		sync();
778*4882a593Smuzhiyun 	}
779*4882a593Smuzhiyun #endif
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A007907
782*4882a593Smuzhiyun 	flush_dcache();
783*4882a593Smuzhiyun 	mtspr(L1CSR2, (mfspr(L1CSR2) & ~L1CSR2_DCSTASHID));
784*4882a593Smuzhiyun 	sync();
785*4882a593Smuzhiyun #endif
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A005812
788*4882a593Smuzhiyun 	/*
789*4882a593Smuzhiyun 	 * A-005812 workaround sets bit 32 of SPR 976 for SoCs running
790*4882a593Smuzhiyun 	 * in write shadow mode. Checking DCWS before setting SPR 976.
791*4882a593Smuzhiyun 	 */
792*4882a593Smuzhiyun 	if (mfspr(L1CSR2) & L1CSR2_DCWS)
793*4882a593Smuzhiyun 		mtspr(SPRN_HDBCR0, (mfspr(SPRN_HDBCR0) | 0x80000000));
794*4882a593Smuzhiyun #endif
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun #if defined(CONFIG_PPC_SPINTABLE_COMPATIBLE) && defined(CONFIG_MP)
797*4882a593Smuzhiyun 	spin = env_get("spin_table_compat");
798*4882a593Smuzhiyun 	if (spin && (*spin == 'n'))
799*4882a593Smuzhiyun 		spin_table_compat = 0;
800*4882a593Smuzhiyun 	else
801*4882a593Smuzhiyun 		spin_table_compat = 1;
802*4882a593Smuzhiyun #endif
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun #ifdef CONFIG_FSL_CORENET
805*4882a593Smuzhiyun 	set_liodns();
806*4882a593Smuzhiyun #ifdef CONFIG_SYS_DPAA_QBMAN
807*4882a593Smuzhiyun 	setup_portals();
808*4882a593Smuzhiyun #endif
809*4882a593Smuzhiyun #endif
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	l2cache_init();
812*4882a593Smuzhiyun #if defined(CONFIG_RAMBOOT_PBL)
813*4882a593Smuzhiyun 	disable_cpc_sram();
814*4882a593Smuzhiyun #endif
815*4882a593Smuzhiyun 	enable_cpc();
816*4882a593Smuzhiyun #if defined(T1040_TDM_QUIRK_CCSR_BASE)
817*4882a593Smuzhiyun 	enable_tdm_law();
818*4882a593Smuzhiyun #endif
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun #ifndef CONFIG_SYS_FSL_NO_SERDES
821*4882a593Smuzhiyun 	/* needs to be in ram since code uses global static vars */
822*4882a593Smuzhiyun 	fsl_serdes_init();
823*4882a593Smuzhiyun #endif
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_SEC_A003571
826*4882a593Smuzhiyun #define MCFGR_AXIPIPE 0x000000f0
827*4882a593Smuzhiyun 	if (IS_SVR_REV(svr, 1, 0))
828*4882a593Smuzhiyun 		sec_clrbits32(&sec->mcfgr, MCFGR_AXIPIPE);
829*4882a593Smuzhiyun #endif
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A005871
832*4882a593Smuzhiyun 	if (IS_SVR_REV(svr, 1, 0)) {
833*4882a593Smuzhiyun 		int i;
834*4882a593Smuzhiyun 		__be32 *p = (void __iomem *)CONFIG_SYS_DCSRBAR + 0xb004c;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 		for (i = 0; i < 12; i++) {
837*4882a593Smuzhiyun 			p += i + (i > 5 ? 11 : 0);
838*4882a593Smuzhiyun 			out_be32(p, 0x2);
839*4882a593Smuzhiyun 		}
840*4882a593Smuzhiyun 		p = (void __iomem *)CONFIG_SYS_DCSRBAR + 0xb0108;
841*4882a593Smuzhiyun 		out_be32(p, 0x34);
842*4882a593Smuzhiyun 	}
843*4882a593Smuzhiyun #endif
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun #ifdef CONFIG_SYS_SRIO
846*4882a593Smuzhiyun 	srio_init();
847*4882a593Smuzhiyun #ifdef CONFIG_SRIO_PCIE_BOOT_MASTER
848*4882a593Smuzhiyun 	char *s = env_get("bootmaster");
849*4882a593Smuzhiyun 	if (s) {
850*4882a593Smuzhiyun 		if (!strcmp(s, "SRIO1")) {
851*4882a593Smuzhiyun 			srio_boot_master(1);
852*4882a593Smuzhiyun 			srio_boot_master_release_slave(1);
853*4882a593Smuzhiyun 		}
854*4882a593Smuzhiyun 		if (!strcmp(s, "SRIO2")) {
855*4882a593Smuzhiyun 			srio_boot_master(2);
856*4882a593Smuzhiyun 			srio_boot_master_release_slave(2);
857*4882a593Smuzhiyun 		}
858*4882a593Smuzhiyun 	}
859*4882a593Smuzhiyun #endif
860*4882a593Smuzhiyun #endif
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun #if defined(CONFIG_MP)
863*4882a593Smuzhiyun 	setup_mp();
864*4882a593Smuzhiyun #endif
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_ESDHC13
867*4882a593Smuzhiyun 	{
868*4882a593Smuzhiyun 		if (SVR_MAJ(svr) < 3) {
869*4882a593Smuzhiyun 			void *p;
870*4882a593Smuzhiyun 			p = (void *)CONFIG_SYS_DCSRBAR + 0x20520;
871*4882a593Smuzhiyun 			setbits_be32(p, 1 << (31 - 14));
872*4882a593Smuzhiyun 		}
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun #endif
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun #ifdef CONFIG_SYS_LBC_LCRR
877*4882a593Smuzhiyun 	/*
878*4882a593Smuzhiyun 	 * Modify the CLKDIV field of LCRR register to improve the writing
879*4882a593Smuzhiyun 	 * speed for NOR flash.
880*4882a593Smuzhiyun 	 */
881*4882a593Smuzhiyun 	clrsetbits_be32(&lbc->lcrr, LCRR_CLKDIV, CONFIG_SYS_LBC_LCRR);
882*4882a593Smuzhiyun 	__raw_readl(&lbc->lcrr);
883*4882a593Smuzhiyun 	isync();
884*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_NMG_LBC103
885*4882a593Smuzhiyun 	udelay(100);
886*4882a593Smuzhiyun #endif
887*4882a593Smuzhiyun #endif
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_USB1_PHY_ENABLE
890*4882a593Smuzhiyun 	{
891*4882a593Smuzhiyun 		struct ccsr_usb_phy __iomem *usb_phy1 =
892*4882a593Smuzhiyun 			(void *)CONFIG_SYS_MPC85xx_USB1_PHY_ADDR;
893*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A006261
894*4882a593Smuzhiyun 		if (has_erratum_a006261())
895*4882a593Smuzhiyun 			fsl_erratum_a006261_workaround(usb_phy1);
896*4882a593Smuzhiyun #endif
897*4882a593Smuzhiyun 		out_be32(&usb_phy1->usb_enable_override,
898*4882a593Smuzhiyun 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
899*4882a593Smuzhiyun 	}
900*4882a593Smuzhiyun #endif
901*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_USB2_PHY_ENABLE
902*4882a593Smuzhiyun 	{
903*4882a593Smuzhiyun 		struct ccsr_usb_phy __iomem *usb_phy2 =
904*4882a593Smuzhiyun 			(void *)CONFIG_SYS_MPC85xx_USB2_PHY_ADDR;
905*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A006261
906*4882a593Smuzhiyun 		if (has_erratum_a006261())
907*4882a593Smuzhiyun 			fsl_erratum_a006261_workaround(usb_phy2);
908*4882a593Smuzhiyun #endif
909*4882a593Smuzhiyun 		out_be32(&usb_phy2->usb_enable_override,
910*4882a593Smuzhiyun 				CONFIG_SYS_FSL_USB_ENABLE_OVERRIDE);
911*4882a593Smuzhiyun 	}
912*4882a593Smuzhiyun #endif
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_USB14
915*4882a593Smuzhiyun 	/* On P204x/P304x/P50x0 Rev1.0, USB transmit will result internal
916*4882a593Smuzhiyun 	 * multi-bit ECC errors which has impact on performance, so software
917*4882a593Smuzhiyun 	 * should disable all ECC reporting from USB1 and USB2.
918*4882a593Smuzhiyun 	 */
919*4882a593Smuzhiyun 	if (IS_SVR_REV(get_svr(), 1, 0)) {
920*4882a593Smuzhiyun 		struct dcsr_dcfg_regs *dcfg = (struct dcsr_dcfg_regs *)
921*4882a593Smuzhiyun 			(CONFIG_SYS_DCSRBAR + CONFIG_SYS_DCSR_DCFG_OFFSET);
922*4882a593Smuzhiyun 		setbits_be32(&dcfg->ecccr1,
923*4882a593Smuzhiyun 				(DCSR_DCFG_ECC_DISABLE_USB1 |
924*4882a593Smuzhiyun 				 DCSR_DCFG_ECC_DISABLE_USB2));
925*4882a593Smuzhiyun 	}
926*4882a593Smuzhiyun #endif
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun #if defined(CONFIG_SYS_FSL_USB_DUAL_PHY_ENABLE)
929*4882a593Smuzhiyun 		struct ccsr_usb_phy __iomem *usb_phy =
930*4882a593Smuzhiyun 			(void *)CONFIG_SYS_MPC85xx_USB1_PHY_ADDR;
931*4882a593Smuzhiyun 		setbits_be32(&usb_phy->pllprg[1],
932*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_PLLPRG2_PHY2_CLK_EN |
933*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_PLLPRG2_PHY1_CLK_EN |
934*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_PLLPRG2_MFI |
935*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_PLLPRG2_PLL_EN);
936*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_SINGLE_SOURCE_CLK
937*4882a593Smuzhiyun 		usb_single_source_clk_configure(usb_phy);
938*4882a593Smuzhiyun #endif
939*4882a593Smuzhiyun 		setbits_be32(&usb_phy->port1.ctrl,
940*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_CTRL_PHY_EN);
941*4882a593Smuzhiyun 		setbits_be32(&usb_phy->port1.drvvbuscfg,
942*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_DRVVBUS_CR_EN);
943*4882a593Smuzhiyun 		setbits_be32(&usb_phy->port1.pwrfltcfg,
944*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_PWRFLT_CR_EN);
945*4882a593Smuzhiyun 		setbits_be32(&usb_phy->port2.ctrl,
946*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_CTRL_PHY_EN);
947*4882a593Smuzhiyun 		setbits_be32(&usb_phy->port2.drvvbuscfg,
948*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_DRVVBUS_CR_EN);
949*4882a593Smuzhiyun 		setbits_be32(&usb_phy->port2.pwrfltcfg,
950*4882a593Smuzhiyun 			     CONFIG_SYS_FSL_USB_PWRFLT_CR_EN);
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A006261
953*4882a593Smuzhiyun 		if (has_erratum_a006261())
954*4882a593Smuzhiyun 			fsl_erratum_a006261_workaround(usb_phy);
955*4882a593Smuzhiyun #endif
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun #endif /* CONFIG_SYS_FSL_USB_DUAL_PHY_ENABLE */
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun #ifdef CONFIG_SYS_FSL_ERRATUM_A009942
960*4882a593Smuzhiyun 	erratum_a009942_check_cpo();
961*4882a593Smuzhiyun #endif
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun #ifdef CONFIG_FMAN_ENET
964*4882a593Smuzhiyun 	fman_enet_init();
965*4882a593Smuzhiyun #endif
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun #if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_FSL_CORENET)
968*4882a593Smuzhiyun 	if (pamu_init() < 0)
969*4882a593Smuzhiyun 		fsl_secboot_handle_error(ERROR_ESBC_PAMU_INIT);
970*4882a593Smuzhiyun #endif
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun #ifdef CONFIG_FSL_CAAM
973*4882a593Smuzhiyun 	sec_init();
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun #if defined(CONFIG_ARCH_C29X)
976*4882a593Smuzhiyun 	if ((SVR_SOC_VER(svr) == SVR_C292) ||
977*4882a593Smuzhiyun 	    (SVR_SOC_VER(svr) == SVR_C293))
978*4882a593Smuzhiyun 		sec_init_idx(1);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	if (SVR_SOC_VER(svr) == SVR_C293)
981*4882a593Smuzhiyun 		sec_init_idx(2);
982*4882a593Smuzhiyun #endif
983*4882a593Smuzhiyun #endif
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun #if defined(CONFIG_FSL_SATA_V2) && defined(CONFIG_SYS_FSL_ERRATUM_SATA_A001)
986*4882a593Smuzhiyun 	/*
987*4882a593Smuzhiyun 	 * For P1022/1013 Rev1.0 silicon, after power on SATA host
988*4882a593Smuzhiyun 	 * controller is configured in legacy mode instead of the
989*4882a593Smuzhiyun 	 * expected enterprise mode. Software needs to clear bit[28]
990*4882a593Smuzhiyun 	 * of HControl register to change to enterprise mode from
991*4882a593Smuzhiyun 	 * legacy mode.  We assume that the controller is offline.
992*4882a593Smuzhiyun 	 */
993*4882a593Smuzhiyun 	if (IS_SVR_REV(svr, 1, 0) &&
994*4882a593Smuzhiyun 	    ((SVR_SOC_VER(svr) == SVR_P1022) ||
995*4882a593Smuzhiyun 	     (SVR_SOC_VER(svr) == SVR_P1013))) {
996*4882a593Smuzhiyun 		fsl_sata_reg_t *reg;
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 		/* first SATA controller */
999*4882a593Smuzhiyun 		reg = (void *)CONFIG_SYS_MPC85xx_SATA1_ADDR;
1000*4882a593Smuzhiyun 		clrbits_le32(&reg->hcontrol, HCONTROL_ENTERPRISE_EN);
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 		/* second SATA controller */
1003*4882a593Smuzhiyun 		reg = (void *)CONFIG_SYS_MPC85xx_SATA2_ADDR;
1004*4882a593Smuzhiyun 		clrbits_le32(&reg->hcontrol, HCONTROL_ENTERPRISE_EN);
1005*4882a593Smuzhiyun 	}
1006*4882a593Smuzhiyun #endif
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	init_used_tlb_cams();
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	return 0;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun 
arch_preboot_os(void)1013*4882a593Smuzhiyun void arch_preboot_os(void)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun 	u32 msr;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	/*
1018*4882a593Smuzhiyun 	 * We are changing interrupt offsets and are about to boot the OS so
1019*4882a593Smuzhiyun 	 * we need to make sure we disable all async interrupts. EE is already
1020*4882a593Smuzhiyun 	 * disabled by the time we get called.
1021*4882a593Smuzhiyun 	 */
1022*4882a593Smuzhiyun 	msr = mfmsr();
1023*4882a593Smuzhiyun 	msr &= ~(MSR_ME|MSR_CE);
1024*4882a593Smuzhiyun 	mtmsr(msr);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun #if defined(CONFIG_SATA) && defined(CONFIG_FSL_SATA)
sata_initialize(void)1028*4882a593Smuzhiyun int sata_initialize(void)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun 	if (is_serdes_configured(SATA1) || is_serdes_configured(SATA2))
1031*4882a593Smuzhiyun 		return __sata_initialize();
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	return 1;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun #endif
1036*4882a593Smuzhiyun 
cpu_secondary_init_r(void)1037*4882a593Smuzhiyun void cpu_secondary_init_r(void)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun #ifdef CONFIG_U_QE
1040*4882a593Smuzhiyun 	uint qe_base = CONFIG_SYS_IMMR + 0x00140000; /* QE immr base */
1041*4882a593Smuzhiyun #elif defined CONFIG_QE
1042*4882a593Smuzhiyun 	uint qe_base = CONFIG_SYS_IMMR + 0x00080000; /* QE immr base */
1043*4882a593Smuzhiyun #endif
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun #ifdef CONFIG_QE
1046*4882a593Smuzhiyun 	qe_init(qe_base);
1047*4882a593Smuzhiyun 	qe_reset();
1048*4882a593Smuzhiyun #endif
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun #ifdef CONFIG_BOARD_LATE_INIT
board_late_init(void)1052*4882a593Smuzhiyun int board_late_init(void)
1053*4882a593Smuzhiyun {
1054*4882a593Smuzhiyun #ifdef CONFIG_CHAIN_OF_TRUST
1055*4882a593Smuzhiyun 	fsl_setenv_chain_of_trust();
1056*4882a593Smuzhiyun #endif
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	return 0;
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun #endif
1061