xref: /rk3399_ARM-atf/bl32/sp_min/aarch32/entrypoint.S (revision 51faada71a219a8b94cd8d8e423f0f22e9da4d8f)
1/*
2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33#include <bl_common.h>
34#include <context.h>
35#include <el3_common_macros.S>
36#include <runtime_svc.h>
37#include <smcc_helpers.h>
38#include <smcc_macros.S>
39#include <xlat_tables_defs.h>
40
41	.globl	sp_min_vector_table
42	.globl	sp_min_entrypoint
43	.globl	sp_min_warm_entrypoint
44
45
46vector_base sp_min_vector_table
47	b	sp_min_entrypoint
48	b	plat_panic_handler	/* Undef */
49	b	handle_smc		/* Syscall */
50	b	plat_panic_handler	/* Prefetch abort */
51	b	plat_panic_handler	/* Data abort */
52	b	plat_panic_handler	/* Reserved */
53	b	plat_panic_handler	/* IRQ */
54	b	plat_panic_handler	/* FIQ */
55
56
57/*
58 * The Cold boot/Reset entrypoint for SP_MIN
59 */
60func sp_min_entrypoint
61#if !RESET_TO_SP_MIN
62	/* ---------------------------------------------------------------
63	 * Preceding bootloader has populated r0 with a pointer to a
64	 * 'bl_params_t' structure & r1 with a pointer to platform
65	 * specific structure
66	 * ---------------------------------------------------------------
67	 */
68	mov	r11, r0
69	mov	r12, r1
70
71	/* ---------------------------------------------------------------------
72	 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
73	 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
74	 * and primary/secondary CPU logic should not be executed in this case.
75	 *
76	 * Also, assume that the previous bootloader has already set up the CPU
77	 * endianness and has initialised the memory.
78	 * ---------------------------------------------------------------------
79	 */
80	el3_entrypoint_common					\
81		_set_endian=0					\
82		_warm_boot_mailbox=0				\
83		_secondary_cold_boot=0				\
84		_init_memory=0					\
85		_init_c_runtime=1				\
86		_exception_vectors=sp_min_vector_table
87
88	/* ---------------------------------------------------------------------
89	 * Relay the previous bootloader's arguments to the platform layer
90	 * ---------------------------------------------------------------------
91	 */
92	mov	r0, r11
93	mov	r1, r12
94#else
95	/* ---------------------------------------------------------------------
96	 * For RESET_TO_SP_MIN systems which have a programmable reset address,
97	 * sp_min_entrypoint() is executed only on the cold boot path so we can
98	 * skip the warm boot mailbox mechanism.
99	 * ---------------------------------------------------------------------
100	 */
101	el3_entrypoint_common					\
102		_set_endian=1					\
103		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
104		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
105		_init_memory=1					\
106		_init_c_runtime=1				\
107		_exception_vectors=sp_min_vector_table
108
109	/* ---------------------------------------------------------------------
110	 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
111	 * to run so there's no argument to relay from a previous bootloader.
112	 * Zero the arguments passed to the platform layer to reflect that.
113	 * ---------------------------------------------------------------------
114	 */
115	mov	r0, #0
116	mov	r1, #0
117#endif /* RESET_TO_SP_MIN */
118
119	bl	sp_min_early_platform_setup
120	bl	sp_min_plat_arch_setup
121
122	/* Jump to the main function */
123	bl	sp_min_main
124
125	/* -------------------------------------------------------------
126	 * Clean the .data & .bss sections to main memory. This ensures
127	 * that any global data which was initialised by the primary CPU
128	 * is visible to secondary CPUs before they enable their data
129	 * caches and participate in coherency.
130	 * -------------------------------------------------------------
131	 */
132	ldr	r0, =__DATA_START__
133	ldr	r1, =__DATA_END__
134	sub	r1, r1, r0
135	bl	clean_dcache_range
136
137	ldr	r0, =__BSS_START__
138	ldr	r1, =__BSS_END__
139	sub	r1, r1, r0
140	bl	clean_dcache_range
141
142	/* Program the registers in cpu_context and exit monitor mode */
143	mov	r0, #NON_SECURE
144	bl	cm_get_context
145
146	/* Restore the SCR */
147	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
148	stcopr	r2, SCR
149	isb
150
151	/* Restore the SCTLR  */
152	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
153	stcopr	r2, SCTLR
154
155	bl	smc_get_next_ctx
156	/* The other cpu_context registers have been copied to smc context */
157	b	sp_min_exit
158endfunc sp_min_entrypoint
159
160
161/*
162 * SMC handling function for SP_MIN.
163 */
164func handle_smc
165	smcc_save_gp_mode_regs
166
167	/* r0 points to smc_context */
168	mov	r2, r0				/* handle */
169	ldcopr	r0, SCR
170
171	/*
172	 * Save SCR in stack. r1 is pushed to meet the 8 byte
173	 * stack alignment requirement.
174	 */
175	push	{r0, r1}
176	and	r3, r0, #SCR_NS_BIT		/* flags */
177
178	/* Switch to Secure Mode*/
179	bic	r0, #SCR_NS_BIT
180	stcopr	r0, SCR
181	isb
182	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
183	/* Check whether an SMC64 is issued */
184	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
185	beq	1f	/* SMC32 is detected */
186	mov	r0, #SMC_UNK
187	str	r0, [r2, #SMC_CTX_GPREG_R0]
188	mov	r0, r2
189	b	2f	/* Skip handling the SMC */
1901:
191	mov	r1, #0				/* cookie */
192	bl	handle_runtime_svc
1932:
194	/* r0 points to smc context */
195
196	/* Restore SCR from stack */
197	pop	{r1, r2}
198	stcopr	r1, SCR
199	isb
200
201	b	sp_min_exit
202endfunc handle_smc
203
204
205/*
206 * The Warm boot entrypoint for SP_MIN.
207 */
208func sp_min_warm_entrypoint
209	/*
210	 * On the warm boot path, most of the EL3 initialisations performed by
211	 * 'el3_entrypoint_common' must be skipped:
212	 *
213	 *  - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
214	 *    programming the reset address do we need to set the CPU endianness.
215	 *    In other cases, we assume this has been taken care by the
216	 *    entrypoint code.
217	 *
218	 *  - No need to determine the type of boot, we know it is a warm boot.
219	 *
220	 *  - Do not try to distinguish between primary and secondary CPUs, this
221	 *    notion only exists for a cold boot.
222	 *
223	 *  - No need to initialise the memory or the C runtime environment,
224	 *    it has been done once and for all on the cold boot path.
225	 */
226	el3_entrypoint_common					\
227		_set_endian=PROGRAMMABLE_RESET_ADDRESS		\
228		_warm_boot_mailbox=0				\
229		_secondary_cold_boot=0				\
230		_init_memory=0					\
231		_init_c_runtime=0				\
232		_exception_vectors=sp_min_vector_table
233
234	/*
235	 * We're about to enable MMU and participate in PSCI state coordination.
236	 *
237	 * The PSCI implementation invokes platform routines that enable CPUs to
238	 * participate in coherency. On a system where CPUs are not
239	 * cache-coherent out of reset, having caches enabled until such time
240	 * might lead to coherency issues (resulting from stale data getting
241	 * speculatively fetched, among others). Therefore we keep data caches
242	 * disabled while enabling the MMU, thereby forcing data accesses to
243	 * have non-cacheable, nGnRnE attributes (these will always be coherent
244	 * with main memory).
245	 *
246	 * On systems where CPUs are cache-coherent out of reset, however, PSCI
247	 * need not invoke platform routines to enter coherency (as CPUs already
248	 * are), and there's no reason to have caches disabled either.
249	 */
250#if HW_ASSISTED_COHERENCY
251	mov	r0, #0
252#else
253	mov	r0, #DISABLE_DCACHE
254#endif
255	bl	bl32_plat_enable_mmu
256
257	bl	sp_min_warm_boot
258
259	/* Program the registers in cpu_context and exit monitor mode */
260	mov	r0, #NON_SECURE
261	bl	cm_get_context
262
263	/* Restore the SCR */
264	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
265	stcopr	r2, SCR
266	isb
267
268	/* Restore the SCTLR  */
269	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
270	stcopr	r2, SCTLR
271
272	bl	smc_get_next_ctx
273
274	/* The other cpu_context registers have been copied to smc context */
275	b	sp_min_exit
276endfunc sp_min_warm_entrypoint
277
278/*
279 * The function to restore the registers from SMC context and return
280 * to the mode restored to SPSR.
281 *
282 * Arguments : r0 must point to the SMC context to restore from.
283 */
284func sp_min_exit
285	smcc_restore_gp_mode_regs
286	eret
287endfunc sp_min_exit
288