xref: /rk3399_rockchip-uboot/arch/arm/cpu/armv8/start.S (revision 5ec685037a799ecdc53ecb1a12a9ed5a9cecb4f4)
1/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * SPDX-License-Identifier:	GPL-2.0+
6 */
7
8#include <asm-offsets.h>
9#include <config.h>
10#include <linux/linkage.h>
11#include <asm/macro.h>
12#include <asm/armv8/mmu.h>
13
14/*************************************************************************
15 *
16 * Startup Code (reset vector)
17 *
18 *************************************************************************/
19
20.globl	_start
21_start:
22#ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK
23/*
24 * Various SoCs need something special and SoC-specific up front in
25 * order to boot, allow them to set that in their boot0.h file and then
26 * use it here.
27 */
28#include <asm/arch/boot0.h>
29#else
30	b	reset
31#endif
32
33	.align 3
34
35.globl	_TEXT_BASE
36_TEXT_BASE:
37	.quad	CONFIG_SYS_TEXT_BASE
38
39/*
40 * These are defined in the linker script.
41 */
42.globl	_end_ofs
43_end_ofs:
44	.quad	_end - _start
45
46.globl	_bss_start_ofs
47_bss_start_ofs:
48	.quad	__bss_start - _start
49
50.globl	_bss_end_ofs
51_bss_end_ofs:
52	.quad	__bss_end - _start
53
54reset:
55	/* Allow the board to save important registers */
56	b	save_boot_params
57.globl	save_boot_params_ret
58save_boot_params_ret:
59
60#ifdef CONFIG_SYS_RESET_SCTRL
61	bl reset_sctrl
62#endif
63	/*
64	 * Could be EL3/EL2/EL1, Initial State:
65	 * Little Endian, MMU Disabled, i/dCache Disabled
66	 */
67	adr	x0, vectors
68	switch_el x1, 3f, 2f, 1f
693:	msr	vbar_el3, x0
70	mrs	x0, scr_el3
71	orr	x0, x0, #0xf			/* SCR_EL3.NS|IRQ|FIQ|EA */
72	msr	scr_el3, x0
73	msr	cptr_el3, xzr			/* Enable FP/SIMD */
74#ifdef COUNTER_FREQUENCY
75	ldr	x0, =COUNTER_FREQUENCY
76	msr	cntfrq_el0, x0			/* Initialize CNTFRQ */
77#endif
78	b	0f
792:	msr	vbar_el2, x0
80	mov	x0, #0x33ff
81	msr	cptr_el2, x0			/* Enable FP/SIMD */
82	b	0f
831:	msr	vbar_el1, x0
84	mov	x0, #3 << 20
85	msr	cpacr_el1, x0			/* Enable FP/SIMD */
860:
87
88	/*
89	 * Enable SMPEN bit for coherency.
90	 * This register is not architectural but at the moment
91	 * this bit should be set for A53/A57/A72.
92	 */
93#ifdef CONFIG_ARMV8_SET_SMPEN
94	switch_el x1, 3f, 1f, 1f
953:
96	mrs     x0, S3_1_c15_c2_1               /* cpuectlr_el1 */
97	orr     x0, x0, #0x40
98	msr     S3_1_c15_c2_1, x0
991:
100#endif
101
102	/* Apply ARM core specific erratas */
103	bl	apply_core_errata
104
105	/*
106	 * Cache/BPB/TLB Invalidate
107	 * i-cache is invalidated before enabled in icache_enable()
108	 * tlb is invalidated before mmu is enabled in dcache_enable()
109	 * d-cache is invalidated before enabled in dcache_enable()
110	 */
111
112	/* Processor specific initialization */
113	bl	lowlevel_init
114
115#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
116	branch_if_master x0, x1, master_cpu
117	b	spin_table_secondary_jump
118	/* never return */
119#elif defined(CONFIG_ARMV8_MULTIENTRY)
120	branch_if_master x0, x1, master_cpu
121
122	/*
123	 * Slave CPUs
124	 */
125slave_cpu:
126	wfe
127	ldr	x1, =CPU_RELEASE_ADDR
128	ldr	x0, [x1]
129	cbz	x0, slave_cpu
130	br	x0			/* branch to the given address */
131#endif /* CONFIG_ARMV8_MULTIENTRY */
132master_cpu:
133	bl	_main
134
135#ifdef CONFIG_SYS_RESET_SCTRL
136reset_sctrl:
137	switch_el x1, 3f, 2f, 1f
1383:
139	mrs	x0, sctlr_el3
140	b	0f
1412:
142	mrs	x0, sctlr_el2
143	b	0f
1441:
145	mrs	x0, sctlr_el1
146
1470:
148	ldr	x1, =0xfdfffffa
149	and	x0, x0, x1
150
151	switch_el x1, 6f, 5f, 4f
1526:
153	msr	sctlr_el3, x0
154	b	7f
1555:
156	msr	sctlr_el2, x0
157	b	7f
1584:
159	msr	sctlr_el1, x0
160
1617:
162	dsb	sy
163	isb
164	b	__asm_invalidate_tlb_all
165	ret
166#endif
167
168/*-----------------------------------------------------------------------*/
169
170WEAK(apply_core_errata)
171
172	mov	x29, lr			/* Save LR */
173	/* For now, we support Cortex-A57 specific errata only */
174
175	/* Check if we are running on a Cortex-A57 core */
176	branch_if_a57_core x0, apply_a57_core_errata
1770:
178	mov	lr, x29			/* Restore LR */
179	ret
180
181apply_a57_core_errata:
182
183#ifdef CONFIG_ARM_ERRATA_828024
184	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
185	/* Disable non-allocate hint of w-b-n-a memory type */
186	orr	x0, x0, #1 << 49
187	/* Disable write streaming no L1-allocate threshold */
188	orr	x0, x0, #3 << 25
189	/* Disable write streaming no-allocate threshold */
190	orr	x0, x0, #3 << 27
191	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
192#endif
193
194#ifdef CONFIG_ARM_ERRATA_826974
195	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
196	/* Disable speculative load execution ahead of a DMB */
197	orr	x0, x0, #1 << 59
198	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
199#endif
200
201#ifdef CONFIG_ARM_ERRATA_833471
202	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
203	/* FPSCR write flush.
204	 * Note that in some cases where a flush is unnecessary this
205	    could impact performance. */
206	orr	x0, x0, #1 << 38
207	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
208#endif
209
210#ifdef CONFIG_ARM_ERRATA_829520
211	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
212	/* Disable Indirect Predictor bit will prevent this erratum
213	    from occurring
214	 * Note that in some cases where a flush is unnecessary this
215	    could impact performance. */
216	orr	x0, x0, #1 << 4
217	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
218#endif
219
220#ifdef CONFIG_ARM_ERRATA_833069
221	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
222	/* Disable Enable Invalidates of BTB bit */
223	and	x0, x0, #0xE
224	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
225#endif
226	b 0b
227ENDPROC(apply_core_errata)
228
229/*-----------------------------------------------------------------------*/
230
231WEAK(lowlevel_init)
232	mov	x29, lr			/* Save LR */
233
234#if !defined(CONFIG_SPL_BUILD) && defined(CONFIG_IRQ)
235	branch_if_slave x0, 1f
236	ldr	x0, =GICD_BASE
237	bl	gic_init_secure
2381:
239#if defined(CONFIG_GICV3)
240	ldr	x0, =GICR_BASE
241	bl	gic_init_secure_percpu
242#elif defined(CONFIG_GICV2)
243	ldr	x0, =GICD_BASE
244	ldr	x1, =GICC_BASE
245	bl	gic_init_secure_percpu
246#endif
247#endif
248
249#if !defined(CONFIG_SPL_BUILD) && defined(CONFIG_IRQ)
250	/*
251	 * Setting HCR_EL2.TGE AMO IMO FMO for exception rounting to EL2
252	 */
253	mrs	x0, CurrentEL		/* check currentEL */
254	cmp	x0, 0x8
255	b.ne	end			/* currentEL != EL2 */
256
257	mrs	x9, hcr_el2
258	orr	x9, x9, #(7 << 3)	/* HCR_EL2.AMO IMO FMO set */
259	orr	x9, x9, #(1 << 27)	/* HCR_EL2.TGE set */
260	msr	hcr_el2, x9
261
262end:
263	nop
264#endif /* CONFIG_IRQ */
265
266#ifdef CONFIG_ARMV8_MULTIENTRY
267	branch_if_master x0, x1, 2f
268
269	/*
270	 * Slave should wait for master clearing spin table.
271	 * This sync prevent salves observing incorrect
272	 * value of spin table and jumping to wrong place.
273	 */
274#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
275#ifdef CONFIG_GICV2
276	ldr	x0, =GICC_BASE
277#endif
278	bl	gic_wait_for_interrupt
279#endif
280
281	/*
282	 * All slaves will enter EL2 and optionally EL1.
283	 */
284	adr	x4, lowlevel_in_el2
285	ldr	x5, =ES_TO_AARCH64
286	bl	armv8_switch_to_el2
287
288lowlevel_in_el2:
289#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
290	adr	x4, lowlevel_in_el1
291	ldr	x5, =ES_TO_AARCH64
292	bl	armv8_switch_to_el1
293
294lowlevel_in_el1:
295#endif
296
297#endif /* CONFIG_ARMV8_MULTIENTRY */
298
2992:
300	mov	lr, x29			/* Restore LR */
301	ret
302ENDPROC(lowlevel_init)
303
304WEAK(smp_kick_all_cpus)
305	/* Kick secondary cpus up by SGI 0 interrupt */
306#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
307	ldr	x0, =GICD_BASE
308	b	gic_kick_secondary_cpus
309#endif
310	ret
311ENDPROC(smp_kick_all_cpus)
312
313/*-----------------------------------------------------------------------*/
314
315ENTRY(c_runtime_cpu_setup)
316	/* Relocate vBAR */
317	adr	x0, vectors
318	switch_el x1, 3f, 2f, 1f
3193:	msr	vbar_el3, x0
320	b	0f
3212:	msr	vbar_el2, x0
322	b	0f
3231:	msr	vbar_el1, x0
3240:
325
326	ret
327ENDPROC(c_runtime_cpu_setup)
328
329WEAK(save_boot_params)
330	b	save_boot_params_ret	/* back to my caller */
331ENDPROC(save_boot_params)
332