xref: /rk3399_rockchip-uboot/arch/arm/cpu/armv8/start.S (revision fa40f8a0c4e1a304a22cb297b4faf59a4627a774)
10ae76531SDavid Feng/*
20ae76531SDavid Feng * (C) Copyright 2013
30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn>
40ae76531SDavid Feng *
50ae76531SDavid Feng * SPDX-License-Identifier:	GPL-2.0+
60ae76531SDavid Feng */
70ae76531SDavid Feng
80ae76531SDavid Feng#include <asm-offsets.h>
90ae76531SDavid Feng#include <config.h>
100ae76531SDavid Feng#include <linux/linkage.h>
110ae76531SDavid Feng#include <asm/macro.h>
120ae76531SDavid Feng#include <asm/armv8/mmu.h>
130ae76531SDavid Feng
140ae76531SDavid Feng/*************************************************************************
150ae76531SDavid Feng *
160ae76531SDavid Feng * Startup Code (reset vector)
170ae76531SDavid Feng *
180ae76531SDavid Feng *************************************************************************/
190ae76531SDavid Feng
200ae76531SDavid Feng.globl	_start
210ae76531SDavid Feng_start:
22cdaa633fSAndre Przywara#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK
23cdaa633fSAndre Przywara/*
24cdaa633fSAndre Przywara * Various SoCs need something special and SoC-specific up front in
25cdaa633fSAndre Przywara * order to boot, allow them to set that in their boot0.h file and then
26cdaa633fSAndre Przywara * use it here.
27cdaa633fSAndre Przywara */
28cdaa633fSAndre Przywara#include <asm/arch/boot0.h>
29a5168a59SAndre Przywara#else
30a5168a59SAndre Przywara	b	reset
31cdaa633fSAndre Przywara#endif
32cdaa633fSAndre Przywara
330ae76531SDavid Feng	.align 3
340ae76531SDavid Feng
350ae76531SDavid Feng.globl	_TEXT_BASE
360ae76531SDavid Feng_TEXT_BASE:
370ae76531SDavid Feng	.quad	CONFIG_SYS_TEXT_BASE
380ae76531SDavid Feng
390ae76531SDavid Feng/*
400ae76531SDavid Feng * These are defined in the linker script.
410ae76531SDavid Feng */
420ae76531SDavid Feng.globl	_end_ofs
430ae76531SDavid Feng_end_ofs:
440ae76531SDavid Feng	.quad	_end - _start
450ae76531SDavid Feng
460ae76531SDavid Feng.globl	_bss_start_ofs
470ae76531SDavid Feng_bss_start_ofs:
480ae76531SDavid Feng	.quad	__bss_start - _start
490ae76531SDavid Feng
500ae76531SDavid Feng.globl	_bss_end_ofs
510ae76531SDavid Feng_bss_end_ofs:
520ae76531SDavid Feng	.quad	__bss_end - _start
530ae76531SDavid Feng
540ae76531SDavid Fengreset:
550e2b5350SStephen Warren	/* Allow the board to save important registers */
560e2b5350SStephen Warren	b	save_boot_params
570e2b5350SStephen Warren.globl	save_boot_params_ret
580e2b5350SStephen Warrensave_boot_params_ret:
590e2b5350SStephen Warren
6094f7ff36SSergey Temerkhanov#ifdef CONFIG_SYS_RESET_SCTRL
6194f7ff36SSergey Temerkhanov	bl reset_sctrl
6294f7ff36SSergey Temerkhanov#endif
630ae76531SDavid Feng	/*
640ae76531SDavid Feng	 * Could be EL3/EL2/EL1, Initial State:
650ae76531SDavid Feng	 * Little Endian, MMU Disabled, i/dCache Disabled
660ae76531SDavid Feng	 */
670ae76531SDavid Feng	adr	x0, vectors
680ae76531SDavid Feng	switch_el x1, 3f, 2f, 1f
691277bac0SDavid Feng3:	msr	vbar_el3, x0
701277bac0SDavid Feng	mrs	x0, scr_el3
71c71645adSDavid Feng	orr	x0, x0, #0xf			/* SCR_EL3.NS|IRQ|FIQ|EA */
72c71645adSDavid Feng	msr	scr_el3, x0
730ae76531SDavid Feng	msr	cptr_el3, xzr			/* Enable FP/SIMD */
7470bcb43eSThierry Reding#ifdef COUNTER_FREQUENCY
750ae76531SDavid Feng	ldr	x0, =COUNTER_FREQUENCY
760ae76531SDavid Feng	msr	cntfrq_el0, x0			/* Initialize CNTFRQ */
7770bcb43eSThierry Reding#endif
780ae76531SDavid Feng	b	0f
790ae76531SDavid Feng2:	msr	vbar_el2, x0
800ae76531SDavid Feng	mov	x0, #0x33ff
810ae76531SDavid Feng	msr	cptr_el2, x0			/* Enable FP/SIMD */
820ae76531SDavid Feng	b	0f
830ae76531SDavid Feng1:	msr	vbar_el1, x0
840ae76531SDavid Feng	mov	x0, #3 << 20
850ae76531SDavid Feng	msr	cpacr_el1, x0			/* Enable FP/SIMD */
860ae76531SDavid Feng0:
870ae76531SDavid Feng
883aec452eSMingkai Hu	/*
899ad7147bSDinh Nguyen	 * Enable SMPEN bit for coherency.
903aec452eSMingkai Hu	 * This register is not architectural but at the moment
913aec452eSMingkai Hu	 * this bit should be set for A53/A57/A72.
923aec452eSMingkai Hu	 */
933aec452eSMingkai Hu#ifdef CONFIG_ARMV8_SET_SMPEN
94399e2bb6SYork Sun	switch_el x1, 3f, 1f, 1f
95399e2bb6SYork Sun3:
969ad7147bSDinh Nguyen	mrs     x0, S3_1_c15_c2_1               /* cpuectlr_el1 */
973aec452eSMingkai Hu	orr     x0, x0, #0x40
983aec452eSMingkai Hu	msr     S3_1_c15_c2_1, x0
99399e2bb6SYork Sun1:
1003aec452eSMingkai Hu#endif
1013aec452eSMingkai Hu
10237118fb2SBhupesh Sharma	/* Apply ARM core specific erratas */
10337118fb2SBhupesh Sharma	bl	apply_core_errata
10437118fb2SBhupesh Sharma
1051e6ad55cSYork Sun	/*
1061e6ad55cSYork Sun	 * Cache/BPB/TLB Invalidate
1071e6ad55cSYork Sun	 * i-cache is invalidated before enabled in icache_enable()
1081e6ad55cSYork Sun	 * tlb is invalidated before mmu is enabled in dcache_enable()
1091e6ad55cSYork Sun	 * d-cache is invalidated before enabled in dcache_enable()
1101e6ad55cSYork Sun	 */
1110ae76531SDavid Feng
1120ae76531SDavid Feng	/* Processor specific initialization */
1130ae76531SDavid Feng	bl	lowlevel_init
1140ae76531SDavid Feng
1154b105f6cSOded Gabbay#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
1166b6024eaSMasahiro Yamada	branch_if_master x0, x1, master_cpu
1176b6024eaSMasahiro Yamada	b	spin_table_secondary_jump
1186b6024eaSMasahiro Yamada	/* never return */
1196b6024eaSMasahiro Yamada#elif defined(CONFIG_ARMV8_MULTIENTRY)
1200ae76531SDavid Feng	branch_if_master x0, x1, master_cpu
1210ae76531SDavid Feng
1220ae76531SDavid Feng	/*
1230ae76531SDavid Feng	 * Slave CPUs
1240ae76531SDavid Feng	 */
1250ae76531SDavid Fengslave_cpu:
1260ae76531SDavid Feng	wfe
1270ae76531SDavid Feng	ldr	x1, =CPU_RELEASE_ADDR
1280ae76531SDavid Feng	ldr	x0, [x1]
1290ae76531SDavid Feng	cbz	x0, slave_cpu
1300ae76531SDavid Feng	br	x0			/* branch to the given address */
13123b5877cSLinus Walleij#endif /* CONFIG_ARMV8_MULTIENTRY */
1326b6024eaSMasahiro Yamadamaster_cpu:
1330ae76531SDavid Feng	bl	_main
1340ae76531SDavid Feng
13594f7ff36SSergey Temerkhanov#ifdef CONFIG_SYS_RESET_SCTRL
13694f7ff36SSergey Temerkhanovreset_sctrl:
13794f7ff36SSergey Temerkhanov	switch_el x1, 3f, 2f, 1f
13894f7ff36SSergey Temerkhanov3:
13994f7ff36SSergey Temerkhanov	mrs	x0, sctlr_el3
14094f7ff36SSergey Temerkhanov	b	0f
14194f7ff36SSergey Temerkhanov2:
14294f7ff36SSergey Temerkhanov	mrs	x0, sctlr_el2
14394f7ff36SSergey Temerkhanov	b	0f
14494f7ff36SSergey Temerkhanov1:
14594f7ff36SSergey Temerkhanov	mrs	x0, sctlr_el1
14694f7ff36SSergey Temerkhanov
14794f7ff36SSergey Temerkhanov0:
14894f7ff36SSergey Temerkhanov	ldr	x1, =0xfdfffffa
14994f7ff36SSergey Temerkhanov	and	x0, x0, x1
15094f7ff36SSergey Temerkhanov
15194f7ff36SSergey Temerkhanov	switch_el x1, 6f, 5f, 4f
15294f7ff36SSergey Temerkhanov6:
15394f7ff36SSergey Temerkhanov	msr	sctlr_el3, x0
15494f7ff36SSergey Temerkhanov	b	7f
15594f7ff36SSergey Temerkhanov5:
15694f7ff36SSergey Temerkhanov	msr	sctlr_el2, x0
15794f7ff36SSergey Temerkhanov	b	7f
15894f7ff36SSergey Temerkhanov4:
15994f7ff36SSergey Temerkhanov	msr	sctlr_el1, x0
16094f7ff36SSergey Temerkhanov
16194f7ff36SSergey Temerkhanov7:
16294f7ff36SSergey Temerkhanov	dsb	sy
16394f7ff36SSergey Temerkhanov	isb
16494f7ff36SSergey Temerkhanov	b	__asm_invalidate_tlb_all
16594f7ff36SSergey Temerkhanov	ret
16694f7ff36SSergey Temerkhanov#endif
16794f7ff36SSergey Temerkhanov
1680ae76531SDavid Feng/*-----------------------------------------------------------------------*/
1690ae76531SDavid Feng
17037118fb2SBhupesh SharmaWEAK(apply_core_errata)
17137118fb2SBhupesh Sharma
17237118fb2SBhupesh Sharma	mov	x29, lr			/* Save LR */
17337118fb2SBhupesh Sharma	/* For now, we support Cortex-A57 specific errata only */
17437118fb2SBhupesh Sharma
17537118fb2SBhupesh Sharma	/* Check if we are running on a Cortex-A57 core */
17637118fb2SBhupesh Sharma	branch_if_a57_core x0, apply_a57_core_errata
17737118fb2SBhupesh Sharma0:
17837118fb2SBhupesh Sharma	mov	lr, x29			/* Restore LR */
17937118fb2SBhupesh Sharma	ret
18037118fb2SBhupesh Sharma
18137118fb2SBhupesh Sharmaapply_a57_core_errata:
18237118fb2SBhupesh Sharma
18337118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_828024
18437118fb2SBhupesh Sharma	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
18537118fb2SBhupesh Sharma	/* Disable non-allocate hint of w-b-n-a memory type */
186f299b5b0SBhupesh Sharma	orr	x0, x0, #1 << 49
18737118fb2SBhupesh Sharma	/* Disable write streaming no L1-allocate threshold */
188f299b5b0SBhupesh Sharma	orr	x0, x0, #3 << 25
18937118fb2SBhupesh Sharma	/* Disable write streaming no-allocate threshold */
190f299b5b0SBhupesh Sharma	orr	x0, x0, #3 << 27
19137118fb2SBhupesh Sharma	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
19237118fb2SBhupesh Sharma#endif
19337118fb2SBhupesh Sharma
19437118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_826974
19537118fb2SBhupesh Sharma	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
19637118fb2SBhupesh Sharma	/* Disable speculative load execution ahead of a DMB */
197f299b5b0SBhupesh Sharma	orr	x0, x0, #1 << 59
19837118fb2SBhupesh Sharma	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
19937118fb2SBhupesh Sharma#endif
20037118fb2SBhupesh Sharma
2012ea3a448SAshish kumar#ifdef CONFIG_ARM_ERRATA_833471
2022ea3a448SAshish kumar	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
2032ea3a448SAshish kumar	/* FPSCR write flush.
2042ea3a448SAshish kumar	 * Note that in some cases where a flush is unnecessary this
2052ea3a448SAshish kumar	    could impact performance. */
2062ea3a448SAshish kumar	orr	x0, x0, #1 << 38
2072ea3a448SAshish kumar	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
2082ea3a448SAshish kumar#endif
2092ea3a448SAshish kumar
2102ea3a448SAshish kumar#ifdef CONFIG_ARM_ERRATA_829520
2112ea3a448SAshish kumar	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
2122ea3a448SAshish kumar	/* Disable Indirect Predictor bit will prevent this erratum
2132ea3a448SAshish kumar	    from occurring
2142ea3a448SAshish kumar	 * Note that in some cases where a flush is unnecessary this
2152ea3a448SAshish kumar	    could impact performance. */
2162ea3a448SAshish kumar	orr	x0, x0, #1 << 4
2172ea3a448SAshish kumar	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
2182ea3a448SAshish kumar#endif
2192ea3a448SAshish kumar
22037118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_833069
22137118fb2SBhupesh Sharma	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
22237118fb2SBhupesh Sharma	/* Disable Enable Invalidates of BTB bit */
22337118fb2SBhupesh Sharma	and	x0, x0, #0xE
22437118fb2SBhupesh Sharma	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
22537118fb2SBhupesh Sharma#endif
22637118fb2SBhupesh Sharma	b 0b
22737118fb2SBhupesh SharmaENDPROC(apply_core_errata)
22837118fb2SBhupesh Sharma
22937118fb2SBhupesh Sharma/*-----------------------------------------------------------------------*/
23037118fb2SBhupesh Sharma
2310ae76531SDavid FengWEAK(lowlevel_init)
2320ae76531SDavid Feng	mov	x29, lr			/* Save LR */
2330ae76531SDavid Feng
234c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
235c71645adSDavid Feng	branch_if_slave x0, 1f
236c71645adSDavid Feng	ldr	x0, =GICD_BASE
237c71645adSDavid Feng	bl	gic_init_secure
238c71645adSDavid Feng1:
239c71645adSDavid Feng#if defined(CONFIG_GICV3)
240c71645adSDavid Feng	ldr	x0, =GICR_BASE
241c71645adSDavid Feng	bl	gic_init_secure_percpu
242c71645adSDavid Feng#elif defined(CONFIG_GICV2)
243c71645adSDavid Feng	ldr	x0, =GICD_BASE
244c71645adSDavid Feng	ldr	x1, =GICC_BASE
245c71645adSDavid Feng	bl	gic_init_secure_percpu
246c71645adSDavid Feng#endif
24711661193SStephen Warren#endif
248c71645adSDavid Feng
249*fa40f8a0SJoseph Chen#if defined(CONFIG_IRQ)
250*fa40f8a0SJoseph Chen	/*
251*fa40f8a0SJoseph Chen	 * Setting HCR_EL2.TGE AMO IMO FMO for exception rounting to EL2
252*fa40f8a0SJoseph Chen	 */
253*fa40f8a0SJoseph Chen	mrs	x0, CurrentEL		/* check currentEL */
254*fa40f8a0SJoseph Chen	cmp	x0, 0x8
255*fa40f8a0SJoseph Chen	b.ne	end			/* currentEL != EL2 */
256*fa40f8a0SJoseph Chen
257*fa40f8a0SJoseph Chen	mrs	x9, hcr_el2
258*fa40f8a0SJoseph Chen	orr	x9, x9, #(7 << 3)	/* HCR_EL2.AMO IMO FMO set */
259*fa40f8a0SJoseph Chen	orr	x9, x9, #(1 << 27)	/* HCR_EL2.TGE set */
260*fa40f8a0SJoseph Chen	msr	hcr_el2, x9
261*fa40f8a0SJoseph Chen
262*fa40f8a0SJoseph Chenend:
263*fa40f8a0SJoseph Chen	nop
264*fa40f8a0SJoseph Chen#endif /* CONFIG_IRQ */
265*fa40f8a0SJoseph Chen
266d38fca40SMasahiro Yamada#ifdef CONFIG_ARMV8_MULTIENTRY
267c71645adSDavid Feng	branch_if_master x0, x1, 2f
2680ae76531SDavid Feng
2690ae76531SDavid Feng	/*
2700ae76531SDavid Feng	 * Slave should wait for master clearing spin table.
2710ae76531SDavid Feng	 * This sync prevent salves observing incorrect
2720ae76531SDavid Feng	 * value of spin table and jumping to wrong place.
2730ae76531SDavid Feng	 */
274c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
275c71645adSDavid Feng#ifdef CONFIG_GICV2
276c71645adSDavid Feng	ldr	x0, =GICC_BASE
277c71645adSDavid Feng#endif
278c71645adSDavid Feng	bl	gic_wait_for_interrupt
279c71645adSDavid Feng#endif
2800ae76531SDavid Feng
2810ae76531SDavid Feng	/*
282c71645adSDavid Feng	 * All slaves will enter EL2 and optionally EL1.
2830ae76531SDavid Feng	 */
2847c5e1febSAlison Wang	adr	x4, lowlevel_in_el2
2857c5e1febSAlison Wang	ldr	x5, =ES_TO_AARCH64
2860ae76531SDavid Feng	bl	armv8_switch_to_el2
287ec6617c3SAlison Wang
288ec6617c3SAlison Wanglowlevel_in_el2:
2890ae76531SDavid Feng#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
2907c5e1febSAlison Wang	adr	x4, lowlevel_in_el1
2917c5e1febSAlison Wang	ldr	x5, =ES_TO_AARCH64
2920ae76531SDavid Feng	bl	armv8_switch_to_el1
293ec6617c3SAlison Wang
294ec6617c3SAlison Wanglowlevel_in_el1:
2950ae76531SDavid Feng#endif
2960ae76531SDavid Feng
29723b5877cSLinus Walleij#endif /* CONFIG_ARMV8_MULTIENTRY */
29823b5877cSLinus Walleij
299c71645adSDavid Feng2:
3000ae76531SDavid Feng	mov	lr, x29			/* Restore LR */
3010ae76531SDavid Feng	ret
3020ae76531SDavid FengENDPROC(lowlevel_init)
3030ae76531SDavid Feng
304c71645adSDavid FengWEAK(smp_kick_all_cpus)
305c71645adSDavid Feng	/* Kick secondary cpus up by SGI 0 interrupt */
306c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
307c71645adSDavid Feng	ldr	x0, =GICD_BASE
308afedf548SMasahiro Yamada	b	gic_kick_secondary_cpus
309c71645adSDavid Feng#endif
310c71645adSDavid Feng	ret
311c71645adSDavid FengENDPROC(smp_kick_all_cpus)
312c71645adSDavid Feng
3130ae76531SDavid Feng/*-----------------------------------------------------------------------*/
3140ae76531SDavid Feng
3150ae76531SDavid FengENTRY(c_runtime_cpu_setup)
3160ae76531SDavid Feng	/* Relocate vBAR */
3170ae76531SDavid Feng	adr	x0, vectors
3180ae76531SDavid Feng	switch_el x1, 3f, 2f, 1f
3190ae76531SDavid Feng3:	msr	vbar_el3, x0
3200ae76531SDavid Feng	b	0f
3210ae76531SDavid Feng2:	msr	vbar_el2, x0
3220ae76531SDavid Feng	b	0f
3230ae76531SDavid Feng1:	msr	vbar_el1, x0
3240ae76531SDavid Feng0:
3250ae76531SDavid Feng
3260ae76531SDavid Feng	ret
3270ae76531SDavid FengENDPROC(c_runtime_cpu_setup)
3280e2b5350SStephen Warren
3290e2b5350SStephen WarrenWEAK(save_boot_params)
3300e2b5350SStephen Warren	b	save_boot_params_ret	/* back to my caller */
3310e2b5350SStephen WarrenENDPROC(save_boot_params)
332