xref: /rk3399_ARM-atf/bl31/aarch64/bl31_entrypoint.S (revision c948f77136c42a92d0bb660543a3600c36dcf7f1)
1/*
2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <platform_def.h>
8
9#include <arch.h>
10#include <common/bl_common.h>
11#include <el3_common_macros.S>
12#include <lib/pmf/pmf_asm_macros.S>
13#include <lib/runtime_instr.h>
14#include <lib/xlat_tables/xlat_mmu_helpers.h>
15
16	.globl	bl31_entrypoint
17	.globl	bl31_warm_entrypoint
18
19	/* -----------------------------------------------------
20	 * bl31_entrypoint() is the cold boot entrypoint,
21	 * executed only by the primary cpu.
22	 * -----------------------------------------------------
23	 */
24
25func bl31_entrypoint
26#if !RESET_TO_BL31
27	/* ---------------------------------------------------------------
28	 * Stash the previous bootloader arguments x0 - x3 for later use.
29	 * ---------------------------------------------------------------
30	 */
31	mov	x20, x0
32	mov	x21, x1
33	mov	x22, x2
34	mov	x23, x3
35
36	/* ---------------------------------------------------------------------
37	 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
38	 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
39	 * and primary/secondary CPU logic should not be executed in this case.
40	 *
41	 * Also, assume that the previous bootloader has already initialised the
42	 * SCTLR_EL3, including the endianness, and has initialised the memory.
43	 * ---------------------------------------------------------------------
44	 */
45	el3_entrypoint_common					\
46		_init_sctlr=0					\
47		_warm_boot_mailbox=0				\
48		_secondary_cold_boot=0				\
49		_init_memory=0					\
50		_init_c_runtime=1				\
51		_exception_vectors=runtime_exceptions
52#else
53	/* ---------------------------------------------------------------------
54	 * For RESET_TO_BL31 systems which have a programmable reset address,
55	 * bl31_entrypoint() is executed only on the cold boot path so we can
56	 * skip the warm boot mailbox mechanism.
57	 * ---------------------------------------------------------------------
58	 */
59	el3_entrypoint_common					\
60		_init_sctlr=1					\
61		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
62		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
63		_init_memory=1					\
64		_init_c_runtime=1				\
65		_exception_vectors=runtime_exceptions
66
67	/* ---------------------------------------------------------------------
68	 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
69	 * there's no argument to relay from a previous bootloader. Zero the
70	 * arguments passed to the platform layer to reflect that.
71	 * ---------------------------------------------------------------------
72	 */
73	mov	x20, 0
74	mov	x21, 0
75	mov	x22, 0
76	mov	x23, 0
77#endif /* RESET_TO_BL31 */
78
79	/* --------------------------------------------------------------------
80	 * If PIE is enabled, fixup the Global descriptor Table and dynamic
81	 * relocations
82	 * --------------------------------------------------------------------
83	 */
84#if ENABLE_PIE
85	mov_imm	x0, BL31_BASE
86	mov_imm	x1, BL31_LIMIT
87	bl	fixup_gdt_reloc
88#endif /* ENABLE_PIE */
89
90	/* ---------------------------------------------
91	 * Perform platform specific early arch. setup
92	 * ---------------------------------------------
93	 */
94	mov	x0, x20
95	mov	x1, x21
96	mov	x2, x22
97	mov	x3, x23
98	bl	bl31_early_platform_setup2
99	bl	bl31_plat_arch_setup
100
101	/* ---------------------------------------------
102	 * Jump to main function.
103	 * ---------------------------------------------
104	 */
105	bl	bl31_main
106
107	/* -------------------------------------------------------------
108	 * Clean the .data & .bss sections to main memory. This ensures
109	 * that any global data which was initialised by the primary CPU
110	 * is visible to secondary CPUs before they enable their data
111	 * caches and participate in coherency.
112	 * -------------------------------------------------------------
113	 */
114	adr	x0, __DATA_START__
115	adr	x1, __DATA_END__
116	sub	x1, x1, x0
117	bl	clean_dcache_range
118
119	adr	x0, __BSS_START__
120	adr	x1, __BSS_END__
121	sub	x1, x1, x0
122	bl	clean_dcache_range
123
124	b	el3_exit
125endfunc bl31_entrypoint
126
127	/* --------------------------------------------------------------------
128	 * This CPU has been physically powered up. It is either resuming from
129	 * suspend or has simply been turned on. In both cases, call the BL31
130	 * warmboot entrypoint
131	 * --------------------------------------------------------------------
132	 */
133func bl31_warm_entrypoint
134#if ENABLE_RUNTIME_INSTRUMENTATION
135
136	/*
137	 * This timestamp update happens with cache off.  The next
138	 * timestamp collection will need to do cache maintenance prior
139	 * to timestamp update.
140	 */
141	pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
142	mrs	x1, cntpct_el0
143	str	x1, [x0]
144#endif
145
146	/*
147	 * On the warm boot path, most of the EL3 initialisations performed by
148	 * 'el3_entrypoint_common' must be skipped:
149	 *
150	 *  - Only when the platform bypasses the BL1/BL31 entrypoint by
151	 *    programming the reset address do we need to initialise SCTLR_EL3.
152	 *    In other cases, we assume this has been taken care by the
153	 *    entrypoint code.
154	 *
155	 *  - No need to determine the type of boot, we know it is a warm boot.
156	 *
157	 *  - Do not try to distinguish between primary and secondary CPUs, this
158	 *    notion only exists for a cold boot.
159	 *
160	 *  - No need to initialise the memory or the C runtime environment,
161	 *    it has been done once and for all on the cold boot path.
162	 */
163	el3_entrypoint_common					\
164		_init_sctlr=PROGRAMMABLE_RESET_ADDRESS		\
165		_warm_boot_mailbox=0				\
166		_secondary_cold_boot=0				\
167		_init_memory=0					\
168		_init_c_runtime=0				\
169		_exception_vectors=runtime_exceptions
170
171	/*
172	 * We're about to enable MMU and participate in PSCI state coordination.
173	 *
174	 * The PSCI implementation invokes platform routines that enable CPUs to
175	 * participate in coherency. On a system where CPUs are not
176	 * cache-coherent without appropriate platform specific programming,
177	 * having caches enabled until such time might lead to coherency issues
178	 * (resulting from stale data getting speculatively fetched, among
179	 * others). Therefore we keep data caches disabled even after enabling
180	 * the MMU for such platforms.
181	 *
182	 * On systems with hardware-assisted coherency, or on single cluster
183	 * platforms, such platform specific programming is not required to
184	 * enter coherency (as CPUs already are); and there's no reason to have
185	 * caches disabled either.
186	 */
187#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
188	mov	x0, xzr
189#else
190	mov	x0, #DISABLE_DCACHE
191#endif
192	bl	bl31_plat_enable_mmu
193
194	bl	psci_warmboot_entrypoint
195
196#if ENABLE_RUNTIME_INSTRUMENTATION
197	pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
198	mov	x19, x0
199
200	/*
201	 * Invalidate before updating timestamp to ensure previous timestamp
202	 * updates on the same cache line with caches disabled are properly
203	 * seen by the same core. Without the cache invalidate, the core might
204	 * write into a stale cache line.
205	 */
206	mov	x1, #PMF_TS_SIZE
207	mov	x20, x30
208	bl	inv_dcache_range
209	mov	x30, x20
210
211	mrs	x0, cntpct_el0
212	str	x0, [x19]
213#endif
214	b	el3_exit
215endfunc bl31_warm_entrypoint
216