xref: /rk3399_rockchip-uboot/arch/arm/cpu/armv8/start.S (revision 37e5dcc8baea3dc897741e9be973d7d226f5edb4)
10ae76531SDavid Feng/*
20ae76531SDavid Feng * (C) Copyright 2013
30ae76531SDavid Feng * David Feng <fenghua@phytium.com.cn>
40ae76531SDavid Feng *
50ae76531SDavid Feng * SPDX-License-Identifier:	GPL-2.0+
60ae76531SDavid Feng */
70ae76531SDavid Feng
80ae76531SDavid Feng#include <asm-offsets.h>
90ae76531SDavid Feng#include <config.h>
100ae76531SDavid Feng#include <linux/linkage.h>
110ae76531SDavid Feng#include <asm/macro.h>
120ae76531SDavid Feng#include <asm/armv8/mmu.h>
130ae76531SDavid Feng
140ae76531SDavid Feng/*************************************************************************
150ae76531SDavid Feng *
160ae76531SDavid Feng * Startup Code (reset vector)
170ae76531SDavid Feng *
180ae76531SDavid Feng *************************************************************************/
190ae76531SDavid Feng
200ae76531SDavid Feng.globl	_start
210ae76531SDavid Feng_start:
22cdaa633fSAndre Przywara#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK
23cdaa633fSAndre Przywara/*
24cdaa633fSAndre Przywara * Various SoCs need something special and SoC-specific up front in
25cdaa633fSAndre Przywara * order to boot, allow them to set that in their boot0.h file and then
26cdaa633fSAndre Przywara * use it here.
27cdaa633fSAndre Przywara */
28cdaa633fSAndre Przywara#include <asm/arch/boot0.h>
29a5168a59SAndre Przywara#else
30a5168a59SAndre Przywara	b	reset
31cdaa633fSAndre Przywara#endif
32cdaa633fSAndre Przywara
33*37e5dcc8SYouMin Chen#if !CONFIG_IS_ENABLED(TINY_FRAMEWORK)
340ae76531SDavid Feng	.align 3
350ae76531SDavid Feng
360ae76531SDavid Feng.globl	_TEXT_BASE
370ae76531SDavid Feng_TEXT_BASE:
380ae76531SDavid Feng	.quad	CONFIG_SYS_TEXT_BASE
390ae76531SDavid Feng
400ae76531SDavid Feng/*
410ae76531SDavid Feng * These are defined in the linker script.
420ae76531SDavid Feng */
430ae76531SDavid Feng.globl	_end_ofs
440ae76531SDavid Feng_end_ofs:
450ae76531SDavid Feng	.quad	_end - _start
460ae76531SDavid Feng
470ae76531SDavid Feng.globl	_bss_start_ofs
480ae76531SDavid Feng_bss_start_ofs:
490ae76531SDavid Feng	.quad	__bss_start - _start
500ae76531SDavid Feng
510ae76531SDavid Feng.globl	_bss_end_ofs
520ae76531SDavid Feng_bss_end_ofs:
530ae76531SDavid Feng	.quad	__bss_end - _start
540ae76531SDavid Feng
550ae76531SDavid Fengreset:
560e2b5350SStephen Warren	/* Allow the board to save important registers */
570e2b5350SStephen Warren	b	save_boot_params
580e2b5350SStephen Warren.globl	save_boot_params_ret
590e2b5350SStephen Warrensave_boot_params_ret:
600e2b5350SStephen Warren
61f00ac1e5SStephen Warren#if CONFIG_POSITION_INDEPENDENT
62f00ac1e5SStephen Warren	/*
63f00ac1e5SStephen Warren	 * Fix .rela.dyn relocations. This allows U-Boot to be loaded to and
64f00ac1e5SStephen Warren	 * executed at a different address than it was linked at.
65f00ac1e5SStephen Warren	 */
66f00ac1e5SStephen Warrenpie_fixup:
67f00ac1e5SStephen Warren	adr	x0, _start		/* x0 <- Runtime value of _start */
68f00ac1e5SStephen Warren	ldr	x1, _TEXT_BASE		/* x1 <- Linked value of _start */
69f00ac1e5SStephen Warren	sub	x9, x0, x1		/* x9 <- Run-vs-link offset */
70f00ac1e5SStephen Warren	adr	x2, __rel_dyn_start	/* x2 <- Runtime &__rel_dyn_start */
71f00ac1e5SStephen Warren	adr	x3, __rel_dyn_end	/* x3 <- Runtime &__rel_dyn_end */
72f00ac1e5SStephen Warrenpie_fix_loop:
73f00ac1e5SStephen Warren	ldp	x0, x1, [x2], #16	/* (x0, x1) <- (Link location, fixup) */
74f00ac1e5SStephen Warren	ldr	x4, [x2], #8		/* x4 <- addend */
75f00ac1e5SStephen Warren	cmp	w1, #1027		/* relative fixup? */
76f00ac1e5SStephen Warren	bne	pie_skip_reloc
77f00ac1e5SStephen Warren	/* relative fix: store addend plus offset at dest location */
78f00ac1e5SStephen Warren	add	x0, x0, x9
79f00ac1e5SStephen Warren	add	x4, x4, x9
80f00ac1e5SStephen Warren	str	x4, [x0]
81f00ac1e5SStephen Warrenpie_skip_reloc:
82f00ac1e5SStephen Warren	cmp	x2, x3
83f00ac1e5SStephen Warren	b.lo	pie_fix_loop
84f00ac1e5SStephen Warrenpie_fixup_done:
85f00ac1e5SStephen Warren#endif
86f00ac1e5SStephen Warren
8794f7ff36SSergey Temerkhanov#ifdef CONFIG_SYS_RESET_SCTRL
8894f7ff36SSergey Temerkhanov	bl reset_sctrl
8994f7ff36SSergey Temerkhanov#endif
900ae76531SDavid Feng	/*
910ae76531SDavid Feng	 * Could be EL3/EL2/EL1, Initial State:
920ae76531SDavid Feng	 * Little Endian, MMU Disabled, i/dCache Disabled
930ae76531SDavid Feng	 */
940ae76531SDavid Feng	adr	x0, vectors
950ae76531SDavid Feng	switch_el x1, 3f, 2f, 1f
961277bac0SDavid Feng3:	msr	vbar_el3, x0
971277bac0SDavid Feng	mrs	x0, scr_el3
98c71645adSDavid Feng	orr	x0, x0, #0xf			/* SCR_EL3.NS|IRQ|FIQ|EA */
99c71645adSDavid Feng	msr	scr_el3, x0
1000ae76531SDavid Feng	msr	cptr_el3, xzr			/* Enable FP/SIMD */
10170bcb43eSThierry Reding#ifdef COUNTER_FREQUENCY
1020ae76531SDavid Feng	ldr	x0, =COUNTER_FREQUENCY
1030ae76531SDavid Feng	msr	cntfrq_el0, x0			/* Initialize CNTFRQ */
10470bcb43eSThierry Reding#endif
1050ae76531SDavid Feng	b	0f
1060ae76531SDavid Feng2:	msr	vbar_el2, x0
1070ae76531SDavid Feng	mov	x0, #0x33ff
1080ae76531SDavid Feng	msr	cptr_el2, x0			/* Enable FP/SIMD */
1090ae76531SDavid Feng	b	0f
1100ae76531SDavid Feng1:	msr	vbar_el1, x0
1110ae76531SDavid Feng	mov	x0, #3 << 20
1120ae76531SDavid Feng	msr	cpacr_el1, x0			/* Enable FP/SIMD */
1130ae76531SDavid Feng0:
1140ae76531SDavid Feng
1153aec452eSMingkai Hu	/*
1169ad7147bSDinh Nguyen	 * Enable SMPEN bit for coherency.
1173aec452eSMingkai Hu	 * This register is not architectural but at the moment
1183aec452eSMingkai Hu	 * this bit should be set for A53/A57/A72.
1193aec452eSMingkai Hu	 */
1203aec452eSMingkai Hu#ifdef CONFIG_ARMV8_SET_SMPEN
121399e2bb6SYork Sun	switch_el x1, 3f, 1f, 1f
122399e2bb6SYork Sun3:
1239ad7147bSDinh Nguyen	mrs     x0, S3_1_c15_c2_1               /* cpuectlr_el1 */
1243aec452eSMingkai Hu	orr     x0, x0, #0x40
1253aec452eSMingkai Hu	msr     S3_1_c15_c2_1, x0
126399e2bb6SYork Sun1:
1273aec452eSMingkai Hu#endif
1283aec452eSMingkai Hu
12937118fb2SBhupesh Sharma	/* Apply ARM core specific erratas */
13037118fb2SBhupesh Sharma	bl	apply_core_errata
13137118fb2SBhupesh Sharma
1321e6ad55cSYork Sun	/*
1331e6ad55cSYork Sun	 * Cache/BPB/TLB Invalidate
1341e6ad55cSYork Sun	 * i-cache is invalidated before enabled in icache_enable()
1351e6ad55cSYork Sun	 * tlb is invalidated before mmu is enabled in dcache_enable()
1361e6ad55cSYork Sun	 * d-cache is invalidated before enabled in dcache_enable()
1371e6ad55cSYork Sun	 */
1380ae76531SDavid Feng
1390ae76531SDavid Feng	/* Processor specific initialization */
1400ae76531SDavid Feng	bl	lowlevel_init
1410ae76531SDavid Feng
1424b105f6cSOded Gabbay#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
1436b6024eaSMasahiro Yamada	branch_if_master x0, x1, master_cpu
1446b6024eaSMasahiro Yamada	b	spin_table_secondary_jump
1456b6024eaSMasahiro Yamada	/* never return */
1466b6024eaSMasahiro Yamada#elif defined(CONFIG_ARMV8_MULTIENTRY)
1470ae76531SDavid Feng	branch_if_master x0, x1, master_cpu
1480ae76531SDavid Feng
1490ae76531SDavid Feng	/*
1500ae76531SDavid Feng	 * Slave CPUs
1510ae76531SDavid Feng	 */
1520ae76531SDavid Fengslave_cpu:
1530ae76531SDavid Feng	wfe
1540ae76531SDavid Feng	ldr	x1, =CPU_RELEASE_ADDR
1550ae76531SDavid Feng	ldr	x0, [x1]
1560ae76531SDavid Feng	cbz	x0, slave_cpu
1570ae76531SDavid Feng	br	x0			/* branch to the given address */
15823b5877cSLinus Walleij#endif /* CONFIG_ARMV8_MULTIENTRY */
1596b6024eaSMasahiro Yamadamaster_cpu:
1600ae76531SDavid Feng	bl	_main
1610ae76531SDavid Feng
16294f7ff36SSergey Temerkhanov#ifdef CONFIG_SYS_RESET_SCTRL
16394f7ff36SSergey Temerkhanovreset_sctrl:
16494f7ff36SSergey Temerkhanov	switch_el x1, 3f, 2f, 1f
16594f7ff36SSergey Temerkhanov3:
16694f7ff36SSergey Temerkhanov	mrs	x0, sctlr_el3
16794f7ff36SSergey Temerkhanov	b	0f
16894f7ff36SSergey Temerkhanov2:
16994f7ff36SSergey Temerkhanov	mrs	x0, sctlr_el2
17094f7ff36SSergey Temerkhanov	b	0f
17194f7ff36SSergey Temerkhanov1:
17294f7ff36SSergey Temerkhanov	mrs	x0, sctlr_el1
17394f7ff36SSergey Temerkhanov
17494f7ff36SSergey Temerkhanov0:
17594f7ff36SSergey Temerkhanov	ldr	x1, =0xfdfffffa
17694f7ff36SSergey Temerkhanov	and	x0, x0, x1
17794f7ff36SSergey Temerkhanov
17894f7ff36SSergey Temerkhanov	switch_el x1, 6f, 5f, 4f
17994f7ff36SSergey Temerkhanov6:
18094f7ff36SSergey Temerkhanov	msr	sctlr_el3, x0
18194f7ff36SSergey Temerkhanov	b	7f
18294f7ff36SSergey Temerkhanov5:
18394f7ff36SSergey Temerkhanov	msr	sctlr_el2, x0
18494f7ff36SSergey Temerkhanov	b	7f
18594f7ff36SSergey Temerkhanov4:
18694f7ff36SSergey Temerkhanov	msr	sctlr_el1, x0
18794f7ff36SSergey Temerkhanov
18894f7ff36SSergey Temerkhanov7:
18994f7ff36SSergey Temerkhanov	dsb	sy
19094f7ff36SSergey Temerkhanov	isb
19194f7ff36SSergey Temerkhanov	b	__asm_invalidate_tlb_all
19294f7ff36SSergey Temerkhanov	ret
19394f7ff36SSergey Temerkhanov#endif
19494f7ff36SSergey Temerkhanov
1950ae76531SDavid Feng/*-----------------------------------------------------------------------*/
1960ae76531SDavid Feng
19737118fb2SBhupesh SharmaWEAK(apply_core_errata)
19837118fb2SBhupesh Sharma
19937118fb2SBhupesh Sharma	mov	x29, lr			/* Save LR */
20037118fb2SBhupesh Sharma	/* For now, we support Cortex-A57 specific errata only */
20137118fb2SBhupesh Sharma
20237118fb2SBhupesh Sharma	/* Check if we are running on a Cortex-A57 core */
20337118fb2SBhupesh Sharma	branch_if_a57_core x0, apply_a57_core_errata
20437118fb2SBhupesh Sharma0:
20537118fb2SBhupesh Sharma	mov	lr, x29			/* Restore LR */
20637118fb2SBhupesh Sharma	ret
20737118fb2SBhupesh Sharma
20837118fb2SBhupesh Sharmaapply_a57_core_errata:
20937118fb2SBhupesh Sharma
21037118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_828024
21137118fb2SBhupesh Sharma	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
21237118fb2SBhupesh Sharma	/* Disable non-allocate hint of w-b-n-a memory type */
213f299b5b0SBhupesh Sharma	orr	x0, x0, #1 << 49
21437118fb2SBhupesh Sharma	/* Disable write streaming no L1-allocate threshold */
215f299b5b0SBhupesh Sharma	orr	x0, x0, #3 << 25
21637118fb2SBhupesh Sharma	/* Disable write streaming no-allocate threshold */
217f299b5b0SBhupesh Sharma	orr	x0, x0, #3 << 27
21837118fb2SBhupesh Sharma	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
21937118fb2SBhupesh Sharma#endif
22037118fb2SBhupesh Sharma
22137118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_826974
22237118fb2SBhupesh Sharma	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
22337118fb2SBhupesh Sharma	/* Disable speculative load execution ahead of a DMB */
224f299b5b0SBhupesh Sharma	orr	x0, x0, #1 << 59
22537118fb2SBhupesh Sharma	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
22637118fb2SBhupesh Sharma#endif
22737118fb2SBhupesh Sharma
2282ea3a448SAshish kumar#ifdef CONFIG_ARM_ERRATA_833471
2292ea3a448SAshish kumar	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
2302ea3a448SAshish kumar	/* FPSCR write flush.
2312ea3a448SAshish kumar	 * Note that in some cases where a flush is unnecessary this
2322ea3a448SAshish kumar	    could impact performance. */
2332ea3a448SAshish kumar	orr	x0, x0, #1 << 38
2342ea3a448SAshish kumar	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
2352ea3a448SAshish kumar#endif
2362ea3a448SAshish kumar
2372ea3a448SAshish kumar#ifdef CONFIG_ARM_ERRATA_829520
2382ea3a448SAshish kumar	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
2392ea3a448SAshish kumar	/* Disable Indirect Predictor bit will prevent this erratum
2402ea3a448SAshish kumar	    from occurring
2412ea3a448SAshish kumar	 * Note that in some cases where a flush is unnecessary this
2422ea3a448SAshish kumar	    could impact performance. */
2432ea3a448SAshish kumar	orr	x0, x0, #1 << 4
2442ea3a448SAshish kumar	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
2452ea3a448SAshish kumar#endif
2462ea3a448SAshish kumar
24737118fb2SBhupesh Sharma#ifdef CONFIG_ARM_ERRATA_833069
24837118fb2SBhupesh Sharma	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
24937118fb2SBhupesh Sharma	/* Disable Enable Invalidates of BTB bit */
25037118fb2SBhupesh Sharma	and	x0, x0, #0xE
25137118fb2SBhupesh Sharma	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
25237118fb2SBhupesh Sharma#endif
25337118fb2SBhupesh Sharma	b 0b
25437118fb2SBhupesh SharmaENDPROC(apply_core_errata)
25537118fb2SBhupesh Sharma
25637118fb2SBhupesh Sharma/*-----------------------------------------------------------------------*/
25737118fb2SBhupesh Sharma
2580ae76531SDavid FengWEAK(lowlevel_init)
2590ae76531SDavid Feng	mov	x29, lr			/* Save LR */
2600ae76531SDavid Feng
261f4fc5f8dSKever Yang#if !defined(CONFIG_SPL_BUILD) && defined(CONFIG_IRQ)
262c71645adSDavid Feng	branch_if_slave x0, 1f
263c71645adSDavid Feng	ldr	x0, =GICD_BASE
264c71645adSDavid Feng	bl	gic_init_secure
265c71645adSDavid Feng1:
266c71645adSDavid Feng#if defined(CONFIG_GICV3)
267c71645adSDavid Feng	ldr	x0, =GICR_BASE
268c71645adSDavid Feng	bl	gic_init_secure_percpu
269c71645adSDavid Feng#elif defined(CONFIG_GICV2)
270c71645adSDavid Feng	ldr	x0, =GICD_BASE
271c71645adSDavid Feng	ldr	x1, =GICC_BASE
272c71645adSDavid Feng	bl	gic_init_secure_percpu
273c71645adSDavid Feng#endif
27411661193SStephen Warren#endif
275c71645adSDavid Feng
276f4fc5f8dSKever Yang#if !defined(CONFIG_SPL_BUILD) && defined(CONFIG_IRQ)
277fa40f8a0SJoseph Chen	/*
278fa40f8a0SJoseph Chen	 * Setting HCR_EL2.TGE AMO IMO FMO for exception rounting to EL2
279fa40f8a0SJoseph Chen	 */
280fa40f8a0SJoseph Chen	mrs	x0, CurrentEL		/* check currentEL */
281fa40f8a0SJoseph Chen	cmp	x0, 0x8
282fa40f8a0SJoseph Chen	b.ne	end			/* currentEL != EL2 */
283fa40f8a0SJoseph Chen
284fa40f8a0SJoseph Chen	mrs	x9, hcr_el2
285fa40f8a0SJoseph Chen	orr	x9, x9, #(7 << 3)	/* HCR_EL2.AMO IMO FMO set */
286fa40f8a0SJoseph Chen	orr	x9, x9, #(1 << 27)	/* HCR_EL2.TGE set */
287fa40f8a0SJoseph Chen	msr	hcr_el2, x9
288fa40f8a0SJoseph Chen
289fa40f8a0SJoseph Chenend:
290fa40f8a0SJoseph Chen	nop
291fa40f8a0SJoseph Chen#endif /* CONFIG_IRQ */
292fa40f8a0SJoseph Chen
293d38fca40SMasahiro Yamada#ifdef CONFIG_ARMV8_MULTIENTRY
294c71645adSDavid Feng	branch_if_master x0, x1, 2f
2950ae76531SDavid Feng
2960ae76531SDavid Feng	/*
2970ae76531SDavid Feng	 * Slave should wait for master clearing spin table.
2980ae76531SDavid Feng	 * This sync prevent salves observing incorrect
2990ae76531SDavid Feng	 * value of spin table and jumping to wrong place.
3000ae76531SDavid Feng	 */
301c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
302c71645adSDavid Feng#ifdef CONFIG_GICV2
303c71645adSDavid Feng	ldr	x0, =GICC_BASE
304c71645adSDavid Feng#endif
305c71645adSDavid Feng	bl	gic_wait_for_interrupt
306c71645adSDavid Feng#endif
3070ae76531SDavid Feng
3080ae76531SDavid Feng	/*
309c71645adSDavid Feng	 * All slaves will enter EL2 and optionally EL1.
3100ae76531SDavid Feng	 */
3117c5e1febSAlison Wang	adr	x4, lowlevel_in_el2
3127c5e1febSAlison Wang	ldr	x5, =ES_TO_AARCH64
3130ae76531SDavid Feng	bl	armv8_switch_to_el2
314ec6617c3SAlison Wang
315ec6617c3SAlison Wanglowlevel_in_el2:
3160ae76531SDavid Feng#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
3177c5e1febSAlison Wang	adr	x4, lowlevel_in_el1
3187c5e1febSAlison Wang	ldr	x5, =ES_TO_AARCH64
3190ae76531SDavid Feng	bl	armv8_switch_to_el1
320ec6617c3SAlison Wang
321ec6617c3SAlison Wanglowlevel_in_el1:
3220ae76531SDavid Feng#endif
3230ae76531SDavid Feng
32423b5877cSLinus Walleij#endif /* CONFIG_ARMV8_MULTIENTRY */
32523b5877cSLinus Walleij
326c71645adSDavid Feng2:
3270ae76531SDavid Feng	mov	lr, x29			/* Restore LR */
3280ae76531SDavid Feng	ret
3290ae76531SDavid FengENDPROC(lowlevel_init)
3300ae76531SDavid Feng
331c71645adSDavid FengWEAK(smp_kick_all_cpus)
332c71645adSDavid Feng	/* Kick secondary cpus up by SGI 0 interrupt */
333c71645adSDavid Feng#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
334c71645adSDavid Feng	ldr	x0, =GICD_BASE
335afedf548SMasahiro Yamada	b	gic_kick_secondary_cpus
336c71645adSDavid Feng#endif
337c71645adSDavid Feng	ret
338c71645adSDavid FengENDPROC(smp_kick_all_cpus)
339c71645adSDavid Feng
3400ae76531SDavid Feng/*-----------------------------------------------------------------------*/
3410ae76531SDavid Feng
3420ae76531SDavid FengENTRY(c_runtime_cpu_setup)
3430ae76531SDavid Feng	/* Relocate vBAR */
3440ae76531SDavid Feng	adr	x0, vectors
3450ae76531SDavid Feng	switch_el x1, 3f, 2f, 1f
3460ae76531SDavid Feng3:	msr	vbar_el3, x0
3470ae76531SDavid Feng	b	0f
3480ae76531SDavid Feng2:	msr	vbar_el2, x0
3490ae76531SDavid Feng	b	0f
3500ae76531SDavid Feng1:	msr	vbar_el1, x0
3510ae76531SDavid Feng0:
3520ae76531SDavid Feng
3530ae76531SDavid Feng	ret
3540ae76531SDavid FengENDPROC(c_runtime_cpu_setup)
3550e2b5350SStephen Warren
3560e2b5350SStephen WarrenWEAK(save_boot_params)
3570e2b5350SStephen Warren	b	save_boot_params_ret	/* back to my caller */
3580e2b5350SStephen WarrenENDPROC(save_boot_params)
359*37e5dcc8SYouMin Chen#endif
360