xref: /rk3399_rockchip-uboot/arch/arm/cpu/armv8/start.S (revision 3aec452e4dbd16be7bdbabfa80d1fcc840cf342c)
10ae76531SDavid Feng/*
20ae76531SDavid Feng * (C) Copyright 2013
30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn>
40ae76531SDavid Feng *
50ae76531SDavid Feng * SPDX-License-Identifier:	GPL-2.0+
60ae76531SDavid Feng */
70ae76531SDavid Feng
80ae76531SDavid Feng#include <asm-offsets.h>
90ae76531SDavid Feng#include <config.h>
100ae76531SDavid Feng#include <linux/linkage.h>
110ae76531SDavid Feng#include <asm/macro.h>
120ae76531SDavid Feng#include <asm/armv8/mmu.h>
130ae76531SDavid Feng
140ae76531SDavid Feng/*************************************************************************
150ae76531SDavid Feng *
160ae76531SDavid Feng * Startup Code (reset vector)
170ae76531SDavid Feng *
180ae76531SDavid Feng *************************************************************************/
190ae76531SDavid Feng
200ae76531SDavid Feng.globl	_start
210ae76531SDavid Feng_start:
220ae76531SDavid Feng	b	reset
230ae76531SDavid Feng
24cdaa633fSAndre Przywara#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK
25cdaa633fSAndre Przywara/*
26cdaa633fSAndre Przywara * Various SoCs need something special and SoC-specific up front in
27cdaa633fSAndre Przywara * order to boot, allow them to set that in their boot0.h file and then
28cdaa633fSAndre Przywara * use it here.
29cdaa633fSAndre Przywara */
30cdaa633fSAndre Przywara#include <asm/arch/boot0.h>
31cdaa633fSAndre PrzywaraARM_SOC_BOOT0_HOOK
32cdaa633fSAndre Przywara#endif
33cdaa633fSAndre Przywara
340ae76531SDavid Feng	.align 3
350ae76531SDavid Feng
360ae76531SDavid Feng.globl	_TEXT_BASE
370ae76531SDavid Feng_TEXT_BASE:
380ae76531SDavid Feng	.quad	CONFIG_SYS_TEXT_BASE
390ae76531SDavid Feng
400ae76531SDavid Feng/*
410ae76531SDavid Feng * These are defined in the linker script.
420ae76531SDavid Feng */
430ae76531SDavid Feng.globl	_end_ofs
440ae76531SDavid Feng_end_ofs:
450ae76531SDavid Feng	.quad	_end - _start
460ae76531SDavid Feng
470ae76531SDavid Feng.globl	_bss_start_ofs
480ae76531SDavid Feng_bss_start_ofs:
490ae76531SDavid Feng	.quad	__bss_start - _start
500ae76531SDavid Feng
510ae76531SDavid Feng.globl	_bss_end_ofs
520ae76531SDavid Feng_bss_end_ofs:
530ae76531SDavid Feng	.quad	__bss_end - _start
540ae76531SDavid Feng
550ae76531SDavid Fengreset:
560e2b5350SStephen Warren	/* Allow the board to save important registers */
570e2b5350SStephen Warren	b	save_boot_params
580e2b5350SStephen Warren.globl	save_boot_params_ret
590e2b5350SStephen Warrensave_boot_params_ret:
600e2b5350SStephen Warren
6194f7ff36SSergey Temerkhanov#ifdef CONFIG_SYS_RESET_SCTRL
6294f7ff36SSergey Temerkhanov	bl reset_sctrl
6394f7ff36SSergey Temerkhanov#endif
640ae76531SDavid Feng	/*
650ae76531SDavid Feng	 * Could be EL3/EL2/EL1, Initial State:
660ae76531SDavid Feng	 * Little Endian, MMU Disabled, i/dCache Disabled
670ae76531SDavid Feng	 */
680ae76531SDavid Feng	adr	x0, vectors
690ae76531SDavid Feng	switch_el x1, 3f, 2f, 1f
701277bac0SDavid Feng3:	msr	vbar_el3, x0
711277bac0SDavid Feng	mrs	x0, scr_el3
72c71645adSDavid Feng	orr	x0, x0, #0xf			/* SCR_EL3.NS|IRQ|FIQ|EA */
73c71645adSDavid Feng	msr	scr_el3, x0
740ae76531SDavid Feng	msr	cptr_el3, xzr			/* Enable FP/SIMD */
7570bcb43eSThierry Reding#ifdef COUNTER_FREQUENCY
760ae76531SDavid Feng	ldr	x0, =COUNTER_FREQUENCY
770ae76531SDavid Feng	msr	cntfrq_el0, x0			/* Initialize CNTFRQ */
7870bcb43eSThierry Reding#endif
790ae76531SDavid Feng	b	0f
800ae76531SDavid Feng2:	msr	vbar_el2, x0
810ae76531SDavid Feng	mov	x0, #0x33ff
820ae76531SDavid Feng	msr	cptr_el2, x0			/* Enable FP/SIMD */
830ae76531SDavid Feng	b	0f
840ae76531SDavid Feng1:	msr	vbar_el1, x0
850ae76531SDavid Feng	mov	x0, #3 << 20
860ae76531SDavid Feng	msr	cpacr_el1, x0			/* Enable FP/SIMD */
870ae76531SDavid Feng0:
880ae76531SDavid Feng
89*3aec452eSMingkai Hu	/*
90*3aec452eSMingkai Hu	 * Enalbe SMPEN bit for coherency.
91*3aec452eSMingkai Hu	 * This register is not architectural but at the moment
92*3aec452eSMingkai Hu	 * this bit should be set for A53/A57/A72.
93*3aec452eSMingkai Hu	 */
94*3aec452eSMingkai Hu#ifdef CONFIG_ARMV8_SET_SMPEN
95*3aec452eSMingkai Hu	mrs     x0, S3_1_c15_c2_1               /* cpuactlr_el1 */
96*3aec452eSMingkai Hu	orr     x0, x0, #0x40
97*3aec452eSMingkai Hu	msr     S3_1_c15_c2_1, x0
98*3aec452eSMingkai Hu#endif
99*3aec452eSMingkai Hu
10037118fb2SBhupesh Sharma	/* Apply ARM core specific erratas */
10137118fb2SBhupesh Sharma	bl	apply_core_errata
10237118fb2SBhupesh Sharma
1031e6ad55cSYork Sun	/*
1041e6ad55cSYork Sun	 * Cache/BPB/TLB Invalidate
1051e6ad55cSYork Sun	 * i-cache is invalidated before enabled in icache_enable()
1061e6ad55cSYork Sun	 * tlb is invalidated before mmu is enabled in dcache_enable()
1071e6ad55cSYork Sun	 * d-cache is invalidated before enabled in dcache_enable()
1081e6ad55cSYork Sun	 */
1090ae76531SDavid Feng
1100ae76531SDavid Feng	/* Processor specific initialization */
1110ae76531SDavid Feng	bl	lowlevel_init
1120ae76531SDavid Feng
1136b6024eaSMasahiro Yamada#if CONFIG_IS_ENABLED(ARMV8_SPIN_TABLE)
1146b6024eaSMasahiro Yamada	branch_if_master x0, x1, master_cpu
1156b6024eaSMasahiro Yamada	b	spin_table_secondary_jump
1166b6024eaSMasahiro Yamada	/* never return */
1176b6024eaSMasahiro Yamada#elif defined(CONFIG_ARMV8_MULTIENTRY)
1180ae76531SDavid Feng	branch_if_master x0, x1, master_cpu
1190ae76531SDavid Feng
1200ae76531SDavid Feng	/*
1210ae76531SDavid Feng	 * Slave CPUs
1220ae76531SDavid Feng	 */
1230ae76531SDavid Fengslave_cpu:
1240ae76531SDavid Feng	wfe
1250ae76531SDavid Feng	ldr	x1, =CPU_RELEASE_ADDR
1260ae76531SDavid Feng	ldr	x0, [x1]
1270ae76531SDavid Feng	cbz	x0, slave_cpu
1280ae76531SDavid Feng	br	x0			/* branch to the given address */
12923b5877cSLinus Walleij#endif /* CONFIG_ARMV8_MULTIENTRY */
1306b6024eaSMasahiro Yamadamaster_cpu:
1310ae76531SDavid Feng	bl	_main
1320ae76531SDavid Feng
13394f7ff36SSergey Temerkhanov#ifdef CONFIG_SYS_RESET_SCTRL
13494f7ff36SSergey Temerkhanovreset_sctrl:
13594f7ff36SSergey Temerkhanov	switch_el x1, 3f, 2f, 1f
13694f7ff36SSergey Temerkhanov3:
13794f7ff36SSergey Temerkhanov	mrs	x0, sctlr_el3
13894f7ff36SSergey Temerkhanov	b	0f
13994f7ff36SSergey Temerkhanov2:
14094f7ff36SSergey Temerkhanov	mrs	x0, sctlr_el2
14194f7ff36SSergey Temerkhanov	b	0f
14294f7ff36SSergey Temerkhanov1:
14394f7ff36SSergey Temerkhanov	mrs	x0, sctlr_el1
14494f7ff36SSergey Temerkhanov
14594f7ff36SSergey Temerkhanov0:
14694f7ff36SSergey Temerkhanov	ldr	x1, =0xfdfffffa
14794f7ff36SSergey Temerkhanov	and	x0, x0, x1
14894f7ff36SSergey Temerkhanov
14994f7ff36SSergey Temerkhanov	switch_el x1, 6f, 5f, 4f
15094f7ff36SSergey Temerkhanov6:
15194f7ff36SSergey Temerkhanov	msr	sctlr_el3, x0
15294f7ff36SSergey Temerkhanov	b	7f
15394f7ff36SSergey Temerkhanov5:
15494f7ff36SSergey Temerkhanov	msr	sctlr_el2, x0
15594f7ff36SSergey Temerkhanov	b	7f
15694f7ff36SSergey Temerkhanov4:
15794f7ff36SSergey Temerkhanov	msr	sctlr_el1, x0
15894f7ff36SSergey Temerkhanov
15994f7ff36SSergey Temerkhanov7:
16094f7ff36SSergey Temerkhanov	dsb	sy
16194f7ff36SSergey Temerkhanov	isb
16294f7ff36SSergey Temerkhanov	b	__asm_invalidate_tlb_all
16394f7ff36SSergey Temerkhanov	ret
16494f7ff36SSergey Temerkhanov#endif
16594f7ff36SSergey Temerkhanov
1660ae76531SDavid Feng/*-----------------------------------------------------------------------*/
1670ae76531SDavid Feng
16837118fb2SBhupesh SharmaWEAK(apply_core_errata)
16937118fb2SBhupesh Sharma
17037118fb2SBhupesh Sharma	mov	x29, lr			/* Save LR */
17137118fb2SBhupesh Sharma	/* For now, we support Cortex-A57 specific errata only */
17237118fb2SBhupesh Sharma
17337118fb2SBhupesh Sharma	/* Check if we are running on a Cortex-A57 core */
17437118fb2SBhupesh Sharma	branch_if_a57_core x0, apply_a57_core_errata
17537118fb2SBhupesh Sharma0:
17637118fb2SBhupesh Sharma	mov	lr, x29			/* Restore LR */
17737118fb2SBhupesh Sharma	ret
17837118fb2SBhupesh Sharma
17937118fb2SBhupesh Sharmaapply_a57_core_errata:
18037118fb2SBhupesh Sharma
18137118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_828024
18237118fb2SBhupesh Sharma	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
18337118fb2SBhupesh Sharma	/* Disable non-allocate hint of w-b-n-a memory type */
184f299b5b0SBhupesh Sharma	orr	x0, x0, #1 << 49
18537118fb2SBhupesh Sharma	/* Disable write streaming no L1-allocate threshold */
186f299b5b0SBhupesh Sharma	orr	x0, x0, #3 << 25
18737118fb2SBhupesh Sharma	/* Disable write streaming no-allocate threshold */
188f299b5b0SBhupesh Sharma	orr	x0, x0, #3 << 27
18937118fb2SBhupesh Sharma	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
19037118fb2SBhupesh Sharma#endif
19137118fb2SBhupesh Sharma
19237118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_826974
19337118fb2SBhupesh Sharma	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
19437118fb2SBhupesh Sharma	/* Disable speculative load execution ahead of a DMB */
195f299b5b0SBhupesh Sharma	orr	x0, x0, #1 << 59
19637118fb2SBhupesh Sharma	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
19737118fb2SBhupesh Sharma#endif
19837118fb2SBhupesh Sharma
1992ea3a448SAshish kumar#ifdef CONFIG_ARM_ERRATA_833471
2002ea3a448SAshish kumar	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
2012ea3a448SAshish kumar	/* FPSCR write flush.
2022ea3a448SAshish kumar	 * Note that in some cases where a flush is unnecessary this
2032ea3a448SAshish kumar	    could impact performance. */
2042ea3a448SAshish kumar	orr	x0, x0, #1 << 38
2052ea3a448SAshish kumar	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
2062ea3a448SAshish kumar#endif
2072ea3a448SAshish kumar
2082ea3a448SAshish kumar#ifdef CONFIG_ARM_ERRATA_829520
2092ea3a448SAshish kumar	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
2102ea3a448SAshish kumar	/* Disable Indirect Predictor bit will prevent this erratum
2112ea3a448SAshish kumar	    from occurring
2122ea3a448SAshish kumar	 * Note that in some cases where a flush is unnecessary this
2132ea3a448SAshish kumar	    could impact performance. */
2142ea3a448SAshish kumar	orr	x0, x0, #1 << 4
2152ea3a448SAshish kumar	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
2162ea3a448SAshish kumar#endif
2172ea3a448SAshish kumar
21837118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_833069
21937118fb2SBhupesh Sharma	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
22037118fb2SBhupesh Sharma	/* Disable Enable Invalidates of BTB bit */
22137118fb2SBhupesh Sharma	and	x0, x0, #0xE
22237118fb2SBhupesh Sharma	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
22337118fb2SBhupesh Sharma#endif
22437118fb2SBhupesh Sharma	b 0b
22537118fb2SBhupesh SharmaENDPROC(apply_core_errata)
22637118fb2SBhupesh Sharma
22737118fb2SBhupesh Sharma/*-----------------------------------------------------------------------*/
22837118fb2SBhupesh Sharma
2290ae76531SDavid FengWEAK(lowlevel_init)
2300ae76531SDavid Feng	mov	x29, lr			/* Save LR */
2310ae76531SDavid Feng
232c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
233c71645adSDavid Feng	branch_if_slave x0, 1f
234c71645adSDavid Feng	ldr	x0, =GICD_BASE
235c71645adSDavid Feng	bl	gic_init_secure
236c71645adSDavid Feng1:
237c71645adSDavid Feng#if defined(CONFIG_GICV3)
238c71645adSDavid Feng	ldr	x0, =GICR_BASE
239c71645adSDavid Feng	bl	gic_init_secure_percpu
240c71645adSDavid Feng#elif defined(CONFIG_GICV2)
241c71645adSDavid Feng	ldr	x0, =GICD_BASE
242c71645adSDavid Feng	ldr	x1, =GICC_BASE
243c71645adSDavid Feng	bl	gic_init_secure_percpu
244c71645adSDavid Feng#endif
24511661193SStephen Warren#endif
246c71645adSDavid Feng
247d38fca40SMasahiro Yamada#ifdef CONFIG_ARMV8_MULTIENTRY
248c71645adSDavid Feng	branch_if_master x0, x1, 2f
2490ae76531SDavid Feng
2500ae76531SDavid Feng	/*
2510ae76531SDavid Feng	 * Slave should wait for master clearing spin table.
2520ae76531SDavid Feng	 * This sync prevent salves observing incorrect
2530ae76531SDavid Feng	 * value of spin table and jumping to wrong place.
2540ae76531SDavid Feng	 */
255c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
256c71645adSDavid Feng#ifdef CONFIG_GICV2
257c71645adSDavid Feng	ldr	x0, =GICC_BASE
258c71645adSDavid Feng#endif
259c71645adSDavid Feng	bl	gic_wait_for_interrupt
260c71645adSDavid Feng#endif
2610ae76531SDavid Feng
2620ae76531SDavid Feng	/*
263c71645adSDavid Feng	 * All slaves will enter EL2 and optionally EL1.
2640ae76531SDavid Feng	 */
265ec6617c3SAlison Wang	adr	x3, lowlevel_in_el2
266ec6617c3SAlison Wang	ldr	x4, =ES_TO_AARCH64
2670ae76531SDavid Feng	bl	armv8_switch_to_el2
268ec6617c3SAlison Wang
269ec6617c3SAlison Wanglowlevel_in_el2:
2700ae76531SDavid Feng#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
271ec6617c3SAlison Wang	adr	x3, lowlevel_in_el1
272ec6617c3SAlison Wang	ldr	x4, =ES_TO_AARCH64
2730ae76531SDavid Feng	bl	armv8_switch_to_el1
274ec6617c3SAlison Wang
275ec6617c3SAlison Wanglowlevel_in_el1:
2760ae76531SDavid Feng#endif
2770ae76531SDavid Feng
27823b5877cSLinus Walleij#endif /* CONFIG_ARMV8_MULTIENTRY */
27923b5877cSLinus Walleij
280c71645adSDavid Feng2:
2810ae76531SDavid Feng	mov	lr, x29			/* Restore LR */
2820ae76531SDavid Feng	ret
2830ae76531SDavid FengENDPROC(lowlevel_init)
2840ae76531SDavid Feng
285c71645adSDavid FengWEAK(smp_kick_all_cpus)
286c71645adSDavid Feng	/* Kick secondary cpus up by SGI 0 interrupt */
287c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
288c71645adSDavid Feng	ldr	x0, =GICD_BASE
289afedf548SMasahiro Yamada	b	gic_kick_secondary_cpus
290c71645adSDavid Feng#endif
291c71645adSDavid Feng	ret
292c71645adSDavid FengENDPROC(smp_kick_all_cpus)
293c71645adSDavid Feng
2940ae76531SDavid Feng/*-----------------------------------------------------------------------*/
2950ae76531SDavid Feng
2960ae76531SDavid FengENTRY(c_runtime_cpu_setup)
2970ae76531SDavid Feng	/* Relocate vBAR */
2980ae76531SDavid Feng	adr	x0, vectors
2990ae76531SDavid Feng	switch_el x1, 3f, 2f, 1f
3000ae76531SDavid Feng3:	msr	vbar_el3, x0
3010ae76531SDavid Feng	b	0f
3020ae76531SDavid Feng2:	msr	vbar_el2, x0
3030ae76531SDavid Feng	b	0f
3040ae76531SDavid Feng1:	msr	vbar_el1, x0
3050ae76531SDavid Feng0:
3060ae76531SDavid Feng
3070ae76531SDavid Feng	ret
3080ae76531SDavid FengENDPROC(c_runtime_cpu_setup)
3090e2b5350SStephen Warren
3100e2b5350SStephen WarrenWEAK(save_boot_params)
3110e2b5350SStephen Warren	b	save_boot_params_ret	/* back to my caller */
3120e2b5350SStephen WarrenENDPROC(save_boot_params)
313