xref: /rk3399_ARM-atf/bl31/aarch64/bl31_entrypoint.S (revision 61f72a34250d063da67f4fc2b0eb8c3fda3376be)
1/*
2 * Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <arch.h>
8#include <bl_common.h>
9#include <el3_common_macros.S>
10#include <pmf_asm_macros.S>
11#include <runtime_instr.h>
12#include <xlat_mmu_helpers.h>
13
14	.globl	bl31_entrypoint
15	.globl	bl31_warm_entrypoint
16
17	/* -----------------------------------------------------
18	 * bl31_entrypoint() is the cold boot entrypoint,
19	 * executed only by the primary cpu.
20	 * -----------------------------------------------------
21	 */
22
23func bl31_entrypoint
24#if !RESET_TO_BL31
25	/* ---------------------------------------------------------------
26	 * Stash the previous bootloader arguments x0 - x3 for later use.
27	 * ---------------------------------------------------------------
28	 */
29	mov	x20, x0
30	mov	x21, x1
31	mov	x22, x2
32	mov	x23, x3
33
34	/* ---------------------------------------------------------------------
35	 * For !RESET_TO_BL31 systems, only the primary CPU ever reaches
36	 * bl31_entrypoint() during the cold boot flow, so the cold/warm boot
37	 * and primary/secondary CPU logic should not be executed in this case.
38	 *
39	 * Also, assume that the previous bootloader has already initialised the
40	 * SCTLR_EL3, including the endianness, and has initialised the memory.
41	 * ---------------------------------------------------------------------
42	 */
43	el3_entrypoint_common					\
44		_init_sctlr=0					\
45		_warm_boot_mailbox=0				\
46		_secondary_cold_boot=0				\
47		_init_memory=0					\
48		_init_c_runtime=1				\
49		_exception_vectors=runtime_exceptions
50#else
51	/* ---------------------------------------------------------------------
52	 * For RESET_TO_BL31 systems which have a programmable reset address,
53	 * bl31_entrypoint() is executed only on the cold boot path so we can
54	 * skip the warm boot mailbox mechanism.
55	 * ---------------------------------------------------------------------
56	 */
57	el3_entrypoint_common					\
58		_init_sctlr=1					\
59		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
60		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
61		_init_memory=1					\
62		_init_c_runtime=1				\
63		_exception_vectors=runtime_exceptions
64
65	/* ---------------------------------------------------------------------
66	 * For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
67	 * there's no argument to relay from a previous bootloader. Zero the
68	 * arguments passed to the platform layer to reflect that.
69	 * ---------------------------------------------------------------------
70	 */
71	mov	x20, 0
72	mov	x21, 0
73	mov	x22, 0
74	mov	x23, 0
75#endif /* RESET_TO_BL31 */
76	/* ---------------------------------------------
77	 * Perform platform specific early arch. setup
78	 * ---------------------------------------------
79	 */
80	mov	x0, x20
81	mov	x1, x21
82	mov	x2, x22
83	mov	x3, x23
84	bl	bl31_early_platform_setup2
85	bl	bl31_plat_arch_setup
86
87	/* ---------------------------------------------
88	 * Jump to main function.
89	 * ---------------------------------------------
90	 */
91	bl	bl31_main
92
93	/* -------------------------------------------------------------
94	 * Clean the .data & .bss sections to main memory. This ensures
95	 * that any global data which was initialised by the primary CPU
96	 * is visible to secondary CPUs before they enable their data
97	 * caches and participate in coherency.
98	 * -------------------------------------------------------------
99	 */
100	adr	x0, __DATA_START__
101	adr	x1, __DATA_END__
102	sub	x1, x1, x0
103	bl	clean_dcache_range
104
105	adr	x0, __BSS_START__
106	adr	x1, __BSS_END__
107	sub	x1, x1, x0
108	bl	clean_dcache_range
109
110	b	el3_exit
111endfunc bl31_entrypoint
112
113	/* --------------------------------------------------------------------
114	 * This CPU has been physically powered up. It is either resuming from
115	 * suspend or has simply been turned on. In both cases, call the BL31
116	 * warmboot entrypoint
117	 * --------------------------------------------------------------------
118	 */
119func bl31_warm_entrypoint
120#if ENABLE_RUNTIME_INSTRUMENTATION
121
122	/*
123	 * This timestamp update happens with cache off.  The next
124	 * timestamp collection will need to do cache maintenance prior
125	 * to timestamp update.
126	 */
127	pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_HW_LOW_PWR
128	mrs	x1, cntpct_el0
129	str	x1, [x0]
130#endif
131
132	/*
133	 * On the warm boot path, most of the EL3 initialisations performed by
134	 * 'el3_entrypoint_common' must be skipped:
135	 *
136	 *  - Only when the platform bypasses the BL1/BL31 entrypoint by
137	 *    programming the reset address do we need to initialise SCTLR_EL3.
138	 *    In other cases, we assume this has been taken care by the
139	 *    entrypoint code.
140	 *
141	 *  - No need to determine the type of boot, we know it is a warm boot.
142	 *
143	 *  - Do not try to distinguish between primary and secondary CPUs, this
144	 *    notion only exists for a cold boot.
145	 *
146	 *  - No need to initialise the memory or the C runtime environment,
147	 *    it has been done once and for all on the cold boot path.
148	 */
149	el3_entrypoint_common					\
150		_init_sctlr=PROGRAMMABLE_RESET_ADDRESS		\
151		_warm_boot_mailbox=0				\
152		_secondary_cold_boot=0				\
153		_init_memory=0					\
154		_init_c_runtime=0				\
155		_exception_vectors=runtime_exceptions
156
157	/*
158	 * We're about to enable MMU and participate in PSCI state coordination.
159	 *
160	 * The PSCI implementation invokes platform routines that enable CPUs to
161	 * participate in coherency. On a system where CPUs are not
162	 * cache-coherent without appropriate platform specific programming,
163	 * having caches enabled until such time might lead to coherency issues
164	 * (resulting from stale data getting speculatively fetched, among
165	 * others). Therefore we keep data caches disabled even after enabling
166	 * the MMU for such platforms.
167	 *
168	 * On systems with hardware-assisted coherency, or on single cluster
169	 * platforms, such platform specific programming is not required to
170	 * enter coherency (as CPUs already are); and there's no reason to have
171	 * caches disabled either.
172	 */
173#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
174	mov	x0, xzr
175#else
176	mov	x0, #DISABLE_DCACHE
177#endif
178	bl	bl31_plat_enable_mmu
179
180	bl	psci_warmboot_entrypoint
181
182#if ENABLE_RUNTIME_INSTRUMENTATION
183	pmf_calc_timestamp_addr rt_instr_svc RT_INSTR_EXIT_PSCI
184	mov	x19, x0
185
186	/*
187	 * Invalidate before updating timestamp to ensure previous timestamp
188	 * updates on the same cache line with caches disabled are properly
189	 * seen by the same core. Without the cache invalidate, the core might
190	 * write into a stale cache line.
191	 */
192	mov	x1, #PMF_TS_SIZE
193	mov	x20, x30
194	bl	inv_dcache_range
195	mov	x30, x20
196
197	mrs	x0, cntpct_el0
198	str	x0, [x19]
199#endif
200	b	el3_exit
201endfunc bl31_warm_entrypoint
202