xref: /OK3568_Linux_fs/u-boot/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/*
2*4882a593Smuzhiyun * (C) Copyright 2014-2015 Freescale Semiconductor
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * SPDX-License-Identifier:	GPL-2.0+
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Extracted from armv8/start.S
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun#include <config.h>
10*4882a593Smuzhiyun#include <linux/linkage.h>
11*4882a593Smuzhiyun#include <asm/gic.h>
12*4882a593Smuzhiyun#include <asm/macro.h>
13*4882a593Smuzhiyun#include <asm/arch-fsl-layerscape/soc.h>
14*4882a593Smuzhiyun#ifdef CONFIG_MP
15*4882a593Smuzhiyun#include <asm/arch/mp.h>
16*4882a593Smuzhiyun#endif
17*4882a593Smuzhiyun#ifdef CONFIG_FSL_LSCH3
18*4882a593Smuzhiyun#include <asm/arch-fsl-layerscape/immap_lsch3.h>
19*4882a593Smuzhiyun#endif
20*4882a593Smuzhiyun#include <asm/u-boot.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun/* Get GIC offset
23*4882a593Smuzhiyun* For LS1043a rev1.0, GIC base address align with 4k.
24*4882a593Smuzhiyun* For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
25*4882a593Smuzhiyun* is set, GIC base address align with 4K, or else align
26*4882a593Smuzhiyun* with 64k.
27*4882a593Smuzhiyun* output:
28*4882a593Smuzhiyun*	x0: the base address of GICD
29*4882a593Smuzhiyun*	x1: the base address of GICC
30*4882a593Smuzhiyun*/
31*4882a593SmuzhiyunENTRY(get_gic_offset)
32*4882a593Smuzhiyun	ldr     x0, =GICD_BASE
33*4882a593Smuzhiyun#ifdef CONFIG_GICV2
34*4882a593Smuzhiyun	ldr     x1, =GICC_BASE
35*4882a593Smuzhiyun#endif
36*4882a593Smuzhiyun#ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN
37*4882a593Smuzhiyun	ldr     x2, =DCFG_CCSR_SVR
38*4882a593Smuzhiyun	ldr	w2, [x2]
39*4882a593Smuzhiyun	rev	w2, w2
40*4882a593Smuzhiyun	lsr	w3, w2, #16
41*4882a593Smuzhiyun	ldr	w4, =SVR_DEV(SVR_LS1043A)
42*4882a593Smuzhiyun	cmp	w3, w4
43*4882a593Smuzhiyun	b.ne	1f
44*4882a593Smuzhiyun	ands	w2, w2, #0xff
45*4882a593Smuzhiyun	cmp	w2, #REV1_0
46*4882a593Smuzhiyun	b.eq	1f
47*4882a593Smuzhiyun	ldr	x2, =SCFG_GIC400_ALIGN
48*4882a593Smuzhiyun	ldr	w2, [x2]
49*4882a593Smuzhiyun	rev	w2, w2
50*4882a593Smuzhiyun	tbnz	w2, #GIC_ADDR_BIT, 1f
51*4882a593Smuzhiyun	ldr     x0, =GICD_BASE_64K
52*4882a593Smuzhiyun#ifdef CONFIG_GICV2
53*4882a593Smuzhiyun	ldr     x1, =GICC_BASE_64K
54*4882a593Smuzhiyun#endif
55*4882a593Smuzhiyun1:
56*4882a593Smuzhiyun#endif
57*4882a593Smuzhiyun	ret
58*4882a593SmuzhiyunENDPROC(get_gic_offset)
59*4882a593Smuzhiyun
60*4882a593SmuzhiyunENTRY(smp_kick_all_cpus)
61*4882a593Smuzhiyun	/* Kick secondary cpus up by SGI 0 interrupt */
62*4882a593Smuzhiyun#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
63*4882a593Smuzhiyun	mov	x29, lr			/* Save LR */
64*4882a593Smuzhiyun	bl	get_gic_offset
65*4882a593Smuzhiyun	bl	gic_kick_secondary_cpus
66*4882a593Smuzhiyun	mov	lr, x29			/* Restore LR */
67*4882a593Smuzhiyun#endif
68*4882a593Smuzhiyun	ret
69*4882a593SmuzhiyunENDPROC(smp_kick_all_cpus)
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun
72*4882a593SmuzhiyunENTRY(lowlevel_init)
73*4882a593Smuzhiyun	mov	x29, lr			/* Save LR */
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun	switch_el x1, 1f, 100f, 100f	/* skip if not in EL3 */
76*4882a593Smuzhiyun1:
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun#ifdef CONFIG_FSL_LSCH3
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun	/* Set Wuo bit for RN-I 20 */
81*4882a593Smuzhiyun#ifdef CONFIG_ARCH_LS2080A
82*4882a593Smuzhiyun	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
83*4882a593Smuzhiyun	ldr	x1, =0x00000010
84*4882a593Smuzhiyun	bl	ccn504_set_aux
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun	/*
87*4882a593Smuzhiyun	 * Set forced-order mode in RNI-6, RNI-20
88*4882a593Smuzhiyun	 * This is required for performance optimization on LS2088A
89*4882a593Smuzhiyun	 * LS2080A family does not support setting forced-order mode,
90*4882a593Smuzhiyun	 * so skip this operation for LS2080A family
91*4882a593Smuzhiyun	 */
92*4882a593Smuzhiyun	bl	get_svr
93*4882a593Smuzhiyun	lsr	w0, w0, #16
94*4882a593Smuzhiyun	ldr	w1, =SVR_DEV(SVR_LS2080A)
95*4882a593Smuzhiyun	cmp	w0, w1
96*4882a593Smuzhiyun	b.eq	1f
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun	ldr	x0, =CCI_AUX_CONTROL_BASE(6)
99*4882a593Smuzhiyun	ldr	x1, =0x00000020
100*4882a593Smuzhiyun	bl	ccn504_set_aux
101*4882a593Smuzhiyun	ldr	x0, =CCI_AUX_CONTROL_BASE(20)
102*4882a593Smuzhiyun	ldr	x1, =0x00000020
103*4882a593Smuzhiyun	bl	ccn504_set_aux
104*4882a593Smuzhiyun1:
105*4882a593Smuzhiyun#endif
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun	/* Add fully-coherent masters to DVM domain */
108*4882a593Smuzhiyun	ldr	x0, =CCI_MN_BASE
109*4882a593Smuzhiyun	ldr	x1, =CCI_MN_RNF_NODEID_LIST
110*4882a593Smuzhiyun	ldr	x2, =CCI_MN_DVM_DOMAIN_CTL_SET
111*4882a593Smuzhiyun	bl	ccn504_add_masters_to_dvm
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun	/* Set all RN-I ports to QoS of 15 */
114*4882a593Smuzhiyun	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(0)
115*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
116*4882a593Smuzhiyun	bl	ccn504_set_qos
117*4882a593Smuzhiyun	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(0)
118*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
119*4882a593Smuzhiyun	bl	ccn504_set_qos
120*4882a593Smuzhiyun	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(0)
121*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
122*4882a593Smuzhiyun	bl	ccn504_set_qos
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(2)
125*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
126*4882a593Smuzhiyun	bl	ccn504_set_qos
127*4882a593Smuzhiyun	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(2)
128*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
129*4882a593Smuzhiyun	bl	ccn504_set_qos
130*4882a593Smuzhiyun	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(2)
131*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
132*4882a593Smuzhiyun	bl	ccn504_set_qos
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(6)
135*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
136*4882a593Smuzhiyun	bl	ccn504_set_qos
137*4882a593Smuzhiyun	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(6)
138*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
139*4882a593Smuzhiyun	bl	ccn504_set_qos
140*4882a593Smuzhiyun	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(6)
141*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
142*4882a593Smuzhiyun	bl	ccn504_set_qos
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(12)
145*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
146*4882a593Smuzhiyun	bl	ccn504_set_qos
147*4882a593Smuzhiyun	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(12)
148*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
149*4882a593Smuzhiyun	bl	ccn504_set_qos
150*4882a593Smuzhiyun	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(12)
151*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
152*4882a593Smuzhiyun	bl	ccn504_set_qos
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(16)
155*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
156*4882a593Smuzhiyun	bl	ccn504_set_qos
157*4882a593Smuzhiyun	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(16)
158*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
159*4882a593Smuzhiyun	bl	ccn504_set_qos
160*4882a593Smuzhiyun	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(16)
161*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
162*4882a593Smuzhiyun	bl	ccn504_set_qos
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun	ldr	x0, =CCI_S0_QOS_CONTROL_BASE(20)
165*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
166*4882a593Smuzhiyun	bl	ccn504_set_qos
167*4882a593Smuzhiyun	ldr	x0, =CCI_S1_QOS_CONTROL_BASE(20)
168*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
169*4882a593Smuzhiyun	bl	ccn504_set_qos
170*4882a593Smuzhiyun	ldr	x0, =CCI_S2_QOS_CONTROL_BASE(20)
171*4882a593Smuzhiyun	ldr	x1, =0x00FF000C
172*4882a593Smuzhiyun	bl	ccn504_set_qos
173*4882a593Smuzhiyun#endif
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun#ifdef SMMU_BASE
176*4882a593Smuzhiyun	/* Set the SMMU page size in the sACR register */
177*4882a593Smuzhiyun	ldr	x1, =SMMU_BASE
178*4882a593Smuzhiyun	ldr	w0, [x1, #0x10]
179*4882a593Smuzhiyun	orr	w0, w0, #1 << 16  /* set sACR.pagesize to indicate 64K page */
180*4882a593Smuzhiyun	str	w0, [x1, #0x10]
181*4882a593Smuzhiyun#endif
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun	/* Initialize GIC Secure Bank Status */
184*4882a593Smuzhiyun#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
185*4882a593Smuzhiyun	branch_if_slave x0, 1f
186*4882a593Smuzhiyun	bl	get_gic_offset
187*4882a593Smuzhiyun	bl	gic_init_secure
188*4882a593Smuzhiyun1:
189*4882a593Smuzhiyun#ifdef CONFIG_GICV3
190*4882a593Smuzhiyun	ldr	x0, =GICR_BASE
191*4882a593Smuzhiyun	bl	gic_init_secure_percpu
192*4882a593Smuzhiyun#elif defined(CONFIG_GICV2)
193*4882a593Smuzhiyun	bl	get_gic_offset
194*4882a593Smuzhiyun	bl	gic_init_secure_percpu
195*4882a593Smuzhiyun#endif
196*4882a593Smuzhiyun#endif
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun100:
199*4882a593Smuzhiyun	branch_if_master x0, x1, 2f
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun#if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
202*4882a593Smuzhiyun	ldr	x0, =secondary_boot_func
203*4882a593Smuzhiyun	blr	x0
204*4882a593Smuzhiyun#endif
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun2:
207*4882a593Smuzhiyun	switch_el x1, 1f, 100f, 100f	/* skip if not in EL3 */
208*4882a593Smuzhiyun1:
209*4882a593Smuzhiyun#ifdef CONFIG_FSL_TZPC_BP147
210*4882a593Smuzhiyun	/* Set Non Secure access for all devices protected via TZPC */
211*4882a593Smuzhiyun	ldr	x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
212*4882a593Smuzhiyun	orr	w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
213*4882a593Smuzhiyun	str	w0, [x1]
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun	isb
216*4882a593Smuzhiyun	dsb	sy
217*4882a593Smuzhiyun#endif
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun#ifdef CONFIG_FSL_TZASC_400
220*4882a593Smuzhiyun	/*
221*4882a593Smuzhiyun	 * LS2080 and its personalities does not support TZASC
222*4882a593Smuzhiyun	 * So skip TZASC related operations
223*4882a593Smuzhiyun	 */
224*4882a593Smuzhiyun	bl	get_svr
225*4882a593Smuzhiyun	lsr	w0, w0, #16
226*4882a593Smuzhiyun	ldr	w1, =SVR_DEV(SVR_LS2080A)
227*4882a593Smuzhiyun	cmp	w0, w1
228*4882a593Smuzhiyun	b.eq	1f
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun	/* Set TZASC so that:
231*4882a593Smuzhiyun	 * a. We use only Region0 whose global secure write/read is EN
232*4882a593Smuzhiyun	 * b. We use only Region0 whose NSAID write/read is EN
233*4882a593Smuzhiyun	 *
234*4882a593Smuzhiyun	 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
235*4882a593Smuzhiyun	 * 	 placeholders.
236*4882a593Smuzhiyun	 */
237*4882a593Smuzhiyun#ifdef CONFIG_FSL_TZASC_1
238*4882a593Smuzhiyun	ldr	x1, =TZASC_GATE_KEEPER(0)
239*4882a593Smuzhiyun	ldr	w0, [x1]		/* Filter 0 Gate Keeper Register */
240*4882a593Smuzhiyun	orr	w0, w0, #1 << 0		/* Set open_request for Filter 0 */
241*4882a593Smuzhiyun	str	w0, [x1]
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun	ldr	x1, =TZASC_REGION_ATTRIBUTES_0(0)
244*4882a593Smuzhiyun	ldr	w0, [x1]		/* Region-0 Attributes Register */
245*4882a593Smuzhiyun	orr	w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
246*4882a593Smuzhiyun	orr	w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
247*4882a593Smuzhiyun	str	w0, [x1]
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun	ldr	x1, =TZASC_REGION_ID_ACCESS_0(0)
250*4882a593Smuzhiyun	ldr	w0, [x1]		/* Region-0 Access Register */
251*4882a593Smuzhiyun	mov	w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
252*4882a593Smuzhiyun	str	w0, [x1]
253*4882a593Smuzhiyun#endif
254*4882a593Smuzhiyun#ifdef CONFIG_FSL_TZASC_2
255*4882a593Smuzhiyun	ldr	x1, =TZASC_GATE_KEEPER(1)
256*4882a593Smuzhiyun	ldr	w0, [x1]		/* Filter 0 Gate Keeper Register */
257*4882a593Smuzhiyun	orr	w0, w0, #1 << 0		/* Set open_request for Filter 0 */
258*4882a593Smuzhiyun	str	w0, [x1]
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun	ldr	x1, =TZASC_REGION_ATTRIBUTES_0(1)
261*4882a593Smuzhiyun	ldr	w0, [x1]		/* Region-1 Attributes Register */
262*4882a593Smuzhiyun	orr	w0, w0, #1 << 31	/* Set Sec global write en, Bit[31] */
263*4882a593Smuzhiyun	orr	w0, w0, #1 << 30	/* Set Sec global read en, Bit[30] */
264*4882a593Smuzhiyun	str	w0, [x1]
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun	ldr	x1, =TZASC_REGION_ID_ACCESS_0(1)
267*4882a593Smuzhiyun	ldr	w0, [x1]		/* Region-1 Attributes Register */
268*4882a593Smuzhiyun	mov	w0, #0xFFFFFFFF		/* Set nsaid_wr_en and nsaid_rd_en */
269*4882a593Smuzhiyun	str	w0, [x1]
270*4882a593Smuzhiyun#endif
271*4882a593Smuzhiyun	isb
272*4882a593Smuzhiyun	dsb	sy
273*4882a593Smuzhiyun#endif
274*4882a593Smuzhiyun100:
275*4882a593Smuzhiyun1:
276*4882a593Smuzhiyun#ifdef CONFIG_ARCH_LS1046A
277*4882a593Smuzhiyun	switch_el x1, 1f, 100f, 100f	/* skip if not in EL3 */
278*4882a593Smuzhiyun1:
279*4882a593Smuzhiyun	/* Initialize the L2 RAM latency */
280*4882a593Smuzhiyun	mrs   x1, S3_1_c11_c0_2
281*4882a593Smuzhiyun	mov   x0, #0x1C7
282*4882a593Smuzhiyun	/* Clear L2 Tag RAM latency and L2 Data RAM latency */
283*4882a593Smuzhiyun	bic   x1, x1, x0
284*4882a593Smuzhiyun	/* Set L2 data ram latency bits [2:0] */
285*4882a593Smuzhiyun	orr   x1, x1, #0x2
286*4882a593Smuzhiyun	/* set L2 tag ram latency bits [8:6] */
287*4882a593Smuzhiyun	orr   x1,  x1, #0x80
288*4882a593Smuzhiyun	msr   S3_1_c11_c0_2, x1
289*4882a593Smuzhiyun	isb
290*4882a593Smuzhiyun100:
291*4882a593Smuzhiyun#endif
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
294*4882a593Smuzhiyun	bl	fsl_ocram_init
295*4882a593Smuzhiyun#endif
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun	mov	lr, x29			/* Restore LR */
298*4882a593Smuzhiyun	ret
299*4882a593SmuzhiyunENDPROC(lowlevel_init)
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
302*4882a593SmuzhiyunENTRY(fsl_ocram_init)
303*4882a593Smuzhiyun	mov	x28, lr			/* Save LR */
304*4882a593Smuzhiyun	bl	fsl_clear_ocram
305*4882a593Smuzhiyun	bl	fsl_ocram_clear_ecc_err
306*4882a593Smuzhiyun	mov	lr, x28			/* Restore LR */
307*4882a593Smuzhiyun	ret
308*4882a593SmuzhiyunENDPROC(fsl_ocram_init)
309*4882a593Smuzhiyun
310*4882a593SmuzhiyunENTRY(fsl_clear_ocram)
311*4882a593Smuzhiyun/* Clear OCRAM */
312*4882a593Smuzhiyun	ldr	x0, =CONFIG_SYS_FSL_OCRAM_BASE
313*4882a593Smuzhiyun	ldr	x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE)
314*4882a593Smuzhiyun	mov	x2, #0
315*4882a593Smuzhiyunclear_loop:
316*4882a593Smuzhiyun	str	x2, [x0]
317*4882a593Smuzhiyun	add	x0, x0, #8
318*4882a593Smuzhiyun	cmp	x0, x1
319*4882a593Smuzhiyun	b.lo	clear_loop
320*4882a593Smuzhiyun	ret
321*4882a593SmuzhiyunENDPROC(fsl_clear_ocram)
322*4882a593Smuzhiyun
323*4882a593SmuzhiyunENTRY(fsl_ocram_clear_ecc_err)
324*4882a593Smuzhiyun	/* OCRAM1/2 ECC status bit */
325*4882a593Smuzhiyun	mov	w1, #0x60
326*4882a593Smuzhiyun	ldr	x0, =DCSR_DCFG_SBEESR2
327*4882a593Smuzhiyun	str	w1, [x0]
328*4882a593Smuzhiyun	ldr	x0, =DCSR_DCFG_MBEESR2
329*4882a593Smuzhiyun	str	w1, [x0]
330*4882a593Smuzhiyun	ret
331*4882a593SmuzhiyunENDPROC(fsl_ocram_init)
332*4882a593Smuzhiyun#endif
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun#ifdef CONFIG_FSL_LSCH3
335*4882a593Smuzhiyun	.globl get_svr
336*4882a593Smuzhiyunget_svr:
337*4882a593Smuzhiyun	ldr	x1, =FSL_LSCH3_SVR
338*4882a593Smuzhiyun	ldr	w0, [x1]
339*4882a593Smuzhiyun	ret
340*4882a593Smuzhiyun
341*4882a593Smuzhiyunhnf_pstate_poll:
342*4882a593Smuzhiyun	/* x0 has the desired status, return 0 for success, 1 for timeout
343*4882a593Smuzhiyun	 * clobber x1, x2, x3, x4, x6, x7
344*4882a593Smuzhiyun	 */
345*4882a593Smuzhiyun	mov	x1, x0
346*4882a593Smuzhiyun	mov	x7, #0			/* flag for timeout */
347*4882a593Smuzhiyun	mrs	x3, cntpct_el0		/* read timer */
348*4882a593Smuzhiyun	add	x3, x3, #1200		/* timeout after 100 microseconds */
349*4882a593Smuzhiyun	mov	x0, #0x18
350*4882a593Smuzhiyun	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_STATUS */
351*4882a593Smuzhiyun	mov	w6, #8			/* HN-F node count */
352*4882a593Smuzhiyun1:
353*4882a593Smuzhiyun	ldr	x2, [x0]
354*4882a593Smuzhiyun	cmp	x2, x1			/* check status */
355*4882a593Smuzhiyun	b.eq	2f
356*4882a593Smuzhiyun	mrs	x4, cntpct_el0
357*4882a593Smuzhiyun	cmp	x4, x3
358*4882a593Smuzhiyun	b.ls	1b
359*4882a593Smuzhiyun	mov	x7, #1			/* timeout */
360*4882a593Smuzhiyun	b	3f
361*4882a593Smuzhiyun2:
362*4882a593Smuzhiyun	add	x0, x0, #0x10000	/* move to next node */
363*4882a593Smuzhiyun	subs	w6, w6, #1
364*4882a593Smuzhiyun	cbnz	w6, 1b
365*4882a593Smuzhiyun3:
366*4882a593Smuzhiyun	mov	x0, x7
367*4882a593Smuzhiyun	ret
368*4882a593Smuzhiyun
369*4882a593Smuzhiyunhnf_set_pstate:
370*4882a593Smuzhiyun	/* x0 has the desired state, clobber x1, x2, x6 */
371*4882a593Smuzhiyun	mov	x1, x0
372*4882a593Smuzhiyun	/* power state to SFONLY */
373*4882a593Smuzhiyun	mov	w6, #8			/* HN-F node count */
374*4882a593Smuzhiyun	mov	x0, #0x10
375*4882a593Smuzhiyun	movk	x0, #0x420, lsl #16	/* HNF0_PSTATE_REQ */
376*4882a593Smuzhiyun1:	/* set pstate to sfonly */
377*4882a593Smuzhiyun	ldr	x2, [x0]
378*4882a593Smuzhiyun	and	x2, x2, #0xfffffffffffffffc	/* & HNFPSTAT_MASK */
379*4882a593Smuzhiyun	orr	x2, x2, x1
380*4882a593Smuzhiyun	str	x2, [x0]
381*4882a593Smuzhiyun	add	x0, x0, #0x10000	/* move to next node */
382*4882a593Smuzhiyun	subs	w6, w6, #1
383*4882a593Smuzhiyun	cbnz	w6, 1b
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun	ret
386*4882a593Smuzhiyun
387*4882a593SmuzhiyunENTRY(__asm_flush_l3_dcache)
388*4882a593Smuzhiyun	/*
389*4882a593Smuzhiyun	 * Return status in x0
390*4882a593Smuzhiyun	 *    success 0
391*4882a593Smuzhiyun	 *    timeout 1 for setting SFONLY, 2 for FAM, 3 for both
392*4882a593Smuzhiyun	 */
393*4882a593Smuzhiyun	mov	x29, lr
394*4882a593Smuzhiyun	mov	x8, #0
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun	switch_el x0, 1f, 100f, 100f	/* skip if not in EL3 */
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun1:
399*4882a593Smuzhiyun	dsb	sy
400*4882a593Smuzhiyun	mov	x0, #0x1		/* HNFPSTAT_SFONLY */
401*4882a593Smuzhiyun	bl	hnf_set_pstate
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun	mov	x0, #0x4		/* SFONLY status */
404*4882a593Smuzhiyun	bl	hnf_pstate_poll
405*4882a593Smuzhiyun	cbz	x0, 1f
406*4882a593Smuzhiyun	mov	x8, #1			/* timeout */
407*4882a593Smuzhiyun1:
408*4882a593Smuzhiyun	dsb	sy
409*4882a593Smuzhiyun	mov	x0, #0x3		/* HNFPSTAT_FAM */
410*4882a593Smuzhiyun	bl	hnf_set_pstate
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun	mov	x0, #0xc		/* FAM status */
413*4882a593Smuzhiyun	bl	hnf_pstate_poll
414*4882a593Smuzhiyun	cbz	x0, 1f
415*4882a593Smuzhiyun	add	x8, x8, #0x2
416*4882a593Smuzhiyun100:
417*4882a593Smuzhiyun1:
418*4882a593Smuzhiyun	mov	x0, x8
419*4882a593Smuzhiyun	mov	lr, x29
420*4882a593Smuzhiyun	ret
421*4882a593SmuzhiyunENDPROC(__asm_flush_l3_dcache)
422*4882a593Smuzhiyun#endif
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun#ifdef CONFIG_MP
425*4882a593Smuzhiyun	/* Keep literals not used by the secondary boot code outside it */
426*4882a593Smuzhiyun	.ltorg
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun	/* Using 64 bit alignment since the spin table is accessed as data */
429*4882a593Smuzhiyun	.align 4
430*4882a593Smuzhiyun	.global secondary_boot_code
431*4882a593Smuzhiyun	/* Secondary Boot Code starts here */
432*4882a593Smuzhiyunsecondary_boot_code:
433*4882a593Smuzhiyun	.global __spin_table
434*4882a593Smuzhiyun__spin_table:
435*4882a593Smuzhiyun	.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun	.align 2
438*4882a593SmuzhiyunENTRY(secondary_boot_func)
439*4882a593Smuzhiyun	/*
440*4882a593Smuzhiyun	 * MPIDR_EL1 Fields:
441*4882a593Smuzhiyun	 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
442*4882a593Smuzhiyun	 * MPIDR[7:2] = AFF0_RES
443*4882a593Smuzhiyun	 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
444*4882a593Smuzhiyun	 * MPIDR[23:16] = AFF2_CLUSTERID
445*4882a593Smuzhiyun	 * MPIDR[24] = MT
446*4882a593Smuzhiyun	 * MPIDR[29:25] = RES0
447*4882a593Smuzhiyun	 * MPIDR[30] = U
448*4882a593Smuzhiyun	 * MPIDR[31] = ME
449*4882a593Smuzhiyun	 * MPIDR[39:32] = AFF3
450*4882a593Smuzhiyun	 *
451*4882a593Smuzhiyun	 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
452*4882a593Smuzhiyun	 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
453*4882a593Smuzhiyun	 * until AFF2_CLUSTERID and AFF3 have non-zero values)
454*4882a593Smuzhiyun	 *
455*4882a593Smuzhiyun	 * LPID = MPIDR[15:8] | MPIDR[1:0]
456*4882a593Smuzhiyun	 */
457*4882a593Smuzhiyun	mrs	x0, mpidr_el1
458*4882a593Smuzhiyun	ubfm	x1, x0, #8, #15
459*4882a593Smuzhiyun	ubfm	x2, x0, #0, #1
460*4882a593Smuzhiyun	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
461*4882a593Smuzhiyun	ubfm    x9, x0, #0, #15         /* x9 contains MPIDR[15:0] */
462*4882a593Smuzhiyun	/*
463*4882a593Smuzhiyun	 * offset of the spin table element for this core from start of spin
464*4882a593Smuzhiyun	 * table (each elem is padded to 64 bytes)
465*4882a593Smuzhiyun	 */
466*4882a593Smuzhiyun	lsl	x1, x10, #6
467*4882a593Smuzhiyun	ldr	x0, =__spin_table
468*4882a593Smuzhiyun	/* physical address of this cpus spin table element */
469*4882a593Smuzhiyun	add	x11, x1, x0
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun	ldr	x0, =__real_cntfrq
472*4882a593Smuzhiyun	ldr	x0, [x0]
473*4882a593Smuzhiyun	msr	cntfrq_el0, x0	/* set with real frequency */
474*4882a593Smuzhiyun	str	x9, [x11, #16]	/* LPID */
475*4882a593Smuzhiyun	mov	x4, #1
476*4882a593Smuzhiyun	str	x4, [x11, #8]	/* STATUS */
477*4882a593Smuzhiyun	dsb	sy
478*4882a593Smuzhiyun#if defined(CONFIG_GICV3)
479*4882a593Smuzhiyun	gic_wait_for_interrupt_m x0
480*4882a593Smuzhiyun#elif defined(CONFIG_GICV2)
481*4882a593Smuzhiyun	bl	get_gic_offset
482*4882a593Smuzhiyun	mov	x0, x1
483*4882a593Smuzhiyun        gic_wait_for_interrupt_m x0, w1
484*4882a593Smuzhiyun#endif
485*4882a593Smuzhiyun
486*4882a593Smuzhiyunslave_cpu:
487*4882a593Smuzhiyun	wfe
488*4882a593Smuzhiyun	ldr	x0, [x11]
489*4882a593Smuzhiyun	cbz	x0, slave_cpu
490*4882a593Smuzhiyun#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
491*4882a593Smuzhiyun	mrs     x1, sctlr_el2
492*4882a593Smuzhiyun#else
493*4882a593Smuzhiyun	mrs     x1, sctlr_el1
494*4882a593Smuzhiyun#endif
495*4882a593Smuzhiyun	tbz     x1, #25, cpu_is_le
496*4882a593Smuzhiyun	rev     x0, x0                  /* BE to LE conversion */
497*4882a593Smuzhiyuncpu_is_le:
498*4882a593Smuzhiyun	ldr	x5, [x11, #24]
499*4882a593Smuzhiyun	cbz	x5, 1f
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
502*4882a593Smuzhiyun	adr	x4, secondary_switch_to_el1
503*4882a593Smuzhiyun	ldr	x5, =ES_TO_AARCH64
504*4882a593Smuzhiyun#else
505*4882a593Smuzhiyun	ldr	x4, [x11]
506*4882a593Smuzhiyun	ldr	x5, =ES_TO_AARCH32
507*4882a593Smuzhiyun#endif
508*4882a593Smuzhiyun	bl	secondary_switch_to_el2
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun1:
511*4882a593Smuzhiyun#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
512*4882a593Smuzhiyun	adr	x4, secondary_switch_to_el1
513*4882a593Smuzhiyun#else
514*4882a593Smuzhiyun	ldr	x4, [x11]
515*4882a593Smuzhiyun#endif
516*4882a593Smuzhiyun	ldr	x5, =ES_TO_AARCH64
517*4882a593Smuzhiyun	bl	secondary_switch_to_el2
518*4882a593Smuzhiyun
519*4882a593SmuzhiyunENDPROC(secondary_boot_func)
520*4882a593Smuzhiyun
521*4882a593SmuzhiyunENTRY(secondary_switch_to_el2)
522*4882a593Smuzhiyun	switch_el x6, 1f, 0f, 0f
523*4882a593Smuzhiyun0:	ret
524*4882a593Smuzhiyun1:	armv8_switch_to_el2_m x4, x5, x6
525*4882a593SmuzhiyunENDPROC(secondary_switch_to_el2)
526*4882a593Smuzhiyun
527*4882a593SmuzhiyunENTRY(secondary_switch_to_el1)
528*4882a593Smuzhiyun	mrs	x0, mpidr_el1
529*4882a593Smuzhiyun	ubfm	x1, x0, #8, #15
530*4882a593Smuzhiyun	ubfm	x2, x0, #0, #1
531*4882a593Smuzhiyun	orr	x10, x2, x1, lsl #2	/* x10 has LPID */
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun	lsl	x1, x10, #6
534*4882a593Smuzhiyun	ldr	x0, =__spin_table
535*4882a593Smuzhiyun	/* physical address of this cpus spin table element */
536*4882a593Smuzhiyun	add	x11, x1, x0
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun	ldr	x4, [x11]
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun	ldr	x5, [x11, #24]
541*4882a593Smuzhiyun	cbz	x5, 2f
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun	ldr	x5, =ES_TO_AARCH32
544*4882a593Smuzhiyun	bl	switch_to_el1
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun2:	ldr	x5, =ES_TO_AARCH64
547*4882a593Smuzhiyun
548*4882a593Smuzhiyunswitch_to_el1:
549*4882a593Smuzhiyun	switch_el x6, 0f, 1f, 0f
550*4882a593Smuzhiyun0:	ret
551*4882a593Smuzhiyun1:	armv8_switch_to_el1_m x4, x5, x6
552*4882a593SmuzhiyunENDPROC(secondary_switch_to_el1)
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun	/* Ensure that the literals used by the secondary boot code are
555*4882a593Smuzhiyun	 * assembled within it (this is required so that we can protect
556*4882a593Smuzhiyun	 * this area with a single memreserve region
557*4882a593Smuzhiyun	 */
558*4882a593Smuzhiyun	.ltorg
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun	/* 64 bit alignment for elements accessed as data */
561*4882a593Smuzhiyun	.align 4
562*4882a593Smuzhiyun	.global __real_cntfrq
563*4882a593Smuzhiyun__real_cntfrq:
564*4882a593Smuzhiyun	.quad COUNTER_FREQUENCY
565*4882a593Smuzhiyun	.globl __secondary_boot_code_size
566*4882a593Smuzhiyun	.type __secondary_boot_code_size, %object
567*4882a593Smuzhiyun	/* Secondary Boot Code ends here */
568*4882a593Smuzhiyun__secondary_boot_code_size:
569*4882a593Smuzhiyun	.quad .-secondary_boot_code
570*4882a593Smuzhiyun#endif
571