xref: /rk3399_ARM-atf/bl32/sp_min/aarch32/entrypoint.S (revision 3bdf0e5df25cf730fbbde7df3dd857d7f2803d1a)
1c11ba852SSoby Mathew/*
2c11ba852SSoby Mathew * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3c11ba852SSoby Mathew *
4c11ba852SSoby Mathew * Redistribution and use in source and binary forms, with or without
5c11ba852SSoby Mathew * modification, are permitted provided that the following conditions are met:
6c11ba852SSoby Mathew *
7c11ba852SSoby Mathew * Redistributions of source code must retain the above copyright notice, this
8c11ba852SSoby Mathew * list of conditions and the following disclaimer.
9c11ba852SSoby Mathew *
10c11ba852SSoby Mathew * Redistributions in binary form must reproduce the above copyright notice,
11c11ba852SSoby Mathew * this list of conditions and the following disclaimer in the documentation
12c11ba852SSoby Mathew * and/or other materials provided with the distribution.
13c11ba852SSoby Mathew *
14c11ba852SSoby Mathew * Neither the name of ARM nor the names of its contributors may be used
15c11ba852SSoby Mathew * to endorse or promote products derived from this software without specific
16c11ba852SSoby Mathew * prior written permission.
17c11ba852SSoby Mathew *
18c11ba852SSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19c11ba852SSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20c11ba852SSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21c11ba852SSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22c11ba852SSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23c11ba852SSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24c11ba852SSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25c11ba852SSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26c11ba852SSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27c11ba852SSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28c11ba852SSoby Mathew * POSSIBILITY OF SUCH DAMAGE.
29c11ba852SSoby Mathew */
30c11ba852SSoby Mathew
31c11ba852SSoby Mathew#include <arch.h>
32c11ba852SSoby Mathew#include <asm_macros.S>
33c11ba852SSoby Mathew#include <bl_common.h>
34c11ba852SSoby Mathew#include <context.h>
35*3bdf0e5dSYatharth Kochar#include <el3_common_macros.S>
36c11ba852SSoby Mathew#include <runtime_svc.h>
37c11ba852SSoby Mathew#include <smcc_helpers.h>
38c11ba852SSoby Mathew#include <smcc_macros.S>
39c11ba852SSoby Mathew#include <xlat_tables.h>
40c11ba852SSoby Mathew
41c11ba852SSoby Mathew	.globl	sp_min_vector_table
42c11ba852SSoby Mathew	.globl	sp_min_entrypoint
43c11ba852SSoby Mathew	.globl	sp_min_warm_entrypoint
44c11ba852SSoby Mathew
45*3bdf0e5dSYatharth Kochar
46*3bdf0e5dSYatharth Kocharvector_base sp_min_vector_table
47c11ba852SSoby Mathew	b	sp_min_entrypoint
48c11ba852SSoby Mathew	b	plat_panic_handler	/* Undef */
49c11ba852SSoby Mathew	b	handle_smc		/* Syscall */
50c11ba852SSoby Mathew	b	plat_panic_handler	/* Prefetch abort */
51c11ba852SSoby Mathew	b	plat_panic_handler	/* Data abort */
52c11ba852SSoby Mathew	b	plat_panic_handler	/* Reserved */
53c11ba852SSoby Mathew	b	plat_panic_handler	/* IRQ */
54c11ba852SSoby Mathew	b	plat_panic_handler	/* FIQ */
55c11ba852SSoby Mathew
56c11ba852SSoby Mathew
57c11ba852SSoby Mathew/*
58c11ba852SSoby Mathew * The Cold boot/Reset entrypoint for SP_MIN
59c11ba852SSoby Mathew */
60c11ba852SSoby Mathewfunc sp_min_entrypoint
61*3bdf0e5dSYatharth Kochar#if !RESET_TO_SP_MIN
62*3bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------
63*3bdf0e5dSYatharth Kochar	 * Preceding bootloader has populated r0 with a pointer to a
64*3bdf0e5dSYatharth Kochar	 * 'bl_params_t' structure & r1 with a pointer to platform
65*3bdf0e5dSYatharth Kochar	 * specific structure
66*3bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------
67c11ba852SSoby Mathew	 */
68*3bdf0e5dSYatharth Kochar	mov	r11, r0
69*3bdf0e5dSYatharth Kochar	mov	r12, r1
70c11ba852SSoby Mathew
71*3bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------------
72*3bdf0e5dSYatharth Kochar	 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
73*3bdf0e5dSYatharth Kochar	 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
74*3bdf0e5dSYatharth Kochar	 * and primary/secondary CPU logic should not be executed in this case.
75*3bdf0e5dSYatharth Kochar	 *
76*3bdf0e5dSYatharth Kochar	 * Also, assume that the previous bootloader has already set up the CPU
77*3bdf0e5dSYatharth Kochar	 * endianness and has initialised the memory.
78*3bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------------
79c11ba852SSoby Mathew	 */
80*3bdf0e5dSYatharth Kochar	el3_entrypoint_common					\
81*3bdf0e5dSYatharth Kochar		_set_endian=0					\
82*3bdf0e5dSYatharth Kochar		_warm_boot_mailbox=0				\
83*3bdf0e5dSYatharth Kochar		_secondary_cold_boot=0				\
84*3bdf0e5dSYatharth Kochar		_init_memory=0					\
85*3bdf0e5dSYatharth Kochar		_init_c_runtime=1				\
86*3bdf0e5dSYatharth Kochar		_exception_vectors=sp_min_vector_table
87c11ba852SSoby Mathew
88*3bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------------
89*3bdf0e5dSYatharth Kochar	 * Relay the previous bootloader's arguments to the platform layer
90*3bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------------
91c11ba852SSoby Mathew	 */
92*3bdf0e5dSYatharth Kochar	mov	r0, r11
93*3bdf0e5dSYatharth Kochar	mov	r1, r12
94*3bdf0e5dSYatharth Kochar#else
95*3bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------------
96*3bdf0e5dSYatharth Kochar	 * For RESET_TO_SP_MIN systems which have a programmable reset address,
97*3bdf0e5dSYatharth Kochar	 * sp_min_entrypoint() is executed only on the cold boot path so we can
98*3bdf0e5dSYatharth Kochar	 * skip the warm boot mailbox mechanism.
99*3bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------------
100c11ba852SSoby Mathew	 */
101*3bdf0e5dSYatharth Kochar	el3_entrypoint_common					\
102*3bdf0e5dSYatharth Kochar		_set_endian=1					\
103*3bdf0e5dSYatharth Kochar		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
104*3bdf0e5dSYatharth Kochar		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
105*3bdf0e5dSYatharth Kochar		_init_memory=1					\
106*3bdf0e5dSYatharth Kochar		_init_c_runtime=1				\
107*3bdf0e5dSYatharth Kochar		_exception_vectors=sp_min_vector_table
108c11ba852SSoby Mathew
109*3bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------------
110*3bdf0e5dSYatharth Kochar	 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
111*3bdf0e5dSYatharth Kochar	 * to run so there's no argument to relay from a previous bootloader.
112*3bdf0e5dSYatharth Kochar	 * Zero the arguments passed to the platform layer to reflect that.
113*3bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------------
114c11ba852SSoby Mathew	 */
115*3bdf0e5dSYatharth Kochar	mov	r0, #0
116*3bdf0e5dSYatharth Kochar	mov	r1, #0
117*3bdf0e5dSYatharth Kochar#endif /* RESET_TO_SP_MIN */
118c11ba852SSoby Mathew
119c11ba852SSoby Mathew	bl	sp_min_early_platform_setup
120c11ba852SSoby Mathew	bl	sp_min_plat_arch_setup
121c11ba852SSoby Mathew
122c11ba852SSoby Mathew	/* Jump to the main function */
123c11ba852SSoby Mathew	bl	sp_min_main
124c11ba852SSoby Mathew
125c11ba852SSoby Mathew	/* -------------------------------------------------------------
126c11ba852SSoby Mathew	 * Clean the .data & .bss sections to main memory. This ensures
127c11ba852SSoby Mathew	 * that any global data which was initialised by the primary CPU
128c11ba852SSoby Mathew	 * is visible to secondary CPUs before they enable their data
129c11ba852SSoby Mathew	 * caches and participate in coherency.
130c11ba852SSoby Mathew	 * -------------------------------------------------------------
131c11ba852SSoby Mathew	 */
132c11ba852SSoby Mathew	ldr	r0, =__DATA_START__
133c11ba852SSoby Mathew	ldr	r1, =__DATA_END__
134c11ba852SSoby Mathew	sub	r1, r1, r0
135c11ba852SSoby Mathew	bl	clean_dcache_range
136c11ba852SSoby Mathew
137c11ba852SSoby Mathew	ldr	r0, =__BSS_START__
138c11ba852SSoby Mathew	ldr	r1, =__BSS_END__
139c11ba852SSoby Mathew	sub	r1, r1, r0
140c11ba852SSoby Mathew	bl	clean_dcache_range
141c11ba852SSoby Mathew
142c11ba852SSoby Mathew	/* Program the registers in cpu_context and exit monitor mode */
143c11ba852SSoby Mathew	mov	r0, #NON_SECURE
144c11ba852SSoby Mathew	bl	cm_get_context
145c11ba852SSoby Mathew
146c11ba852SSoby Mathew	/* Restore the SCR */
147c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
148c11ba852SSoby Mathew	stcopr	r2, SCR
149c11ba852SSoby Mathew	isb
150c11ba852SSoby Mathew
151c11ba852SSoby Mathew	/* Restore the SCTLR  */
152c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
153c11ba852SSoby Mathew	stcopr	r2, SCTLR
154c11ba852SSoby Mathew
155c11ba852SSoby Mathew	bl	smc_get_next_ctx
156c11ba852SSoby Mathew	/* The other cpu_context registers have been copied to smc context */
157c11ba852SSoby Mathew	b	sp_min_exit
158c11ba852SSoby Mathewendfunc sp_min_entrypoint
159c11ba852SSoby Mathew
160*3bdf0e5dSYatharth Kochar
161*3bdf0e5dSYatharth Kochar/*
162*3bdf0e5dSYatharth Kochar * SMC handling function for SP_MIN.
163*3bdf0e5dSYatharth Kochar */
164*3bdf0e5dSYatharth Kocharfunc handle_smc
165*3bdf0e5dSYatharth Kochar	smcc_save_gp_mode_regs
166*3bdf0e5dSYatharth Kochar
167*3bdf0e5dSYatharth Kochar	/* r0 points to smc_context */
168*3bdf0e5dSYatharth Kochar	mov	r2, r0				/* handle */
169*3bdf0e5dSYatharth Kochar	ldcopr	r0, SCR
170*3bdf0e5dSYatharth Kochar
171*3bdf0e5dSYatharth Kochar	/* Save SCR in stack */
172*3bdf0e5dSYatharth Kochar	push	{r0}
173*3bdf0e5dSYatharth Kochar	and	r3, r0, #SCR_NS_BIT		/* flags */
174*3bdf0e5dSYatharth Kochar
175*3bdf0e5dSYatharth Kochar	/* Switch to Secure Mode*/
176*3bdf0e5dSYatharth Kochar	bic	r0, #SCR_NS_BIT
177*3bdf0e5dSYatharth Kochar	stcopr	r0, SCR
178*3bdf0e5dSYatharth Kochar	isb
179*3bdf0e5dSYatharth Kochar	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
180*3bdf0e5dSYatharth Kochar	/* Check whether an SMC64 is issued */
181*3bdf0e5dSYatharth Kochar	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
182*3bdf0e5dSYatharth Kochar	beq	1f	/* SMC32 is detected */
183*3bdf0e5dSYatharth Kochar	mov	r0, #SMC_UNK
184*3bdf0e5dSYatharth Kochar	str	r0, [r2, #SMC_CTX_GPREG_R0]
185*3bdf0e5dSYatharth Kochar	mov	r0, r2
186*3bdf0e5dSYatharth Kochar	b	2f	/* Skip handling the SMC */
187*3bdf0e5dSYatharth Kochar1:
188*3bdf0e5dSYatharth Kochar	mov	r1, #0				/* cookie */
189*3bdf0e5dSYatharth Kochar	bl	handle_runtime_svc
190*3bdf0e5dSYatharth Kochar2:
191*3bdf0e5dSYatharth Kochar	/* r0 points to smc context */
192*3bdf0e5dSYatharth Kochar
193*3bdf0e5dSYatharth Kochar	/* Restore SCR from stack */
194*3bdf0e5dSYatharth Kochar	pop	{r1}
195*3bdf0e5dSYatharth Kochar	stcopr	r1, SCR
196*3bdf0e5dSYatharth Kochar	isb
197*3bdf0e5dSYatharth Kochar
198*3bdf0e5dSYatharth Kochar	b	sp_min_exit
199*3bdf0e5dSYatharth Kocharendfunc handle_smc
200*3bdf0e5dSYatharth Kochar
201*3bdf0e5dSYatharth Kochar
202c11ba852SSoby Mathew/*
203c11ba852SSoby Mathew * The Warm boot entrypoint for SP_MIN.
204c11ba852SSoby Mathew */
205c11ba852SSoby Mathewfunc sp_min_warm_entrypoint
206*3bdf0e5dSYatharth Kochar	/*
207*3bdf0e5dSYatharth Kochar	 * On the warm boot path, most of the EL3 initialisations performed by
208*3bdf0e5dSYatharth Kochar	 * 'el3_entrypoint_common' must be skipped:
209*3bdf0e5dSYatharth Kochar	 *
210*3bdf0e5dSYatharth Kochar	 *  - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
211*3bdf0e5dSYatharth Kochar	 *    programming the reset address do we need to set the CPU endianness.
212*3bdf0e5dSYatharth Kochar	 *    In other cases, we assume this has been taken care by the
213*3bdf0e5dSYatharth Kochar	 *    entrypoint code.
214*3bdf0e5dSYatharth Kochar	 *
215*3bdf0e5dSYatharth Kochar	 *  - No need to determine the type of boot, we know it is a warm boot.
216*3bdf0e5dSYatharth Kochar	 *
217*3bdf0e5dSYatharth Kochar	 *  - Do not try to distinguish between primary and secondary CPUs, this
218*3bdf0e5dSYatharth Kochar	 *    notion only exists for a cold boot.
219*3bdf0e5dSYatharth Kochar	 *
220*3bdf0e5dSYatharth Kochar	 *  - No need to initialise the memory or the C runtime environment,
221*3bdf0e5dSYatharth Kochar	 *    it has been done once and for all on the cold boot path.
222*3bdf0e5dSYatharth Kochar	 */
223*3bdf0e5dSYatharth Kochar	el3_entrypoint_common					\
224*3bdf0e5dSYatharth Kochar		_set_endian=PROGRAMMABLE_RESET_ADDRESS		\
225*3bdf0e5dSYatharth Kochar		_warm_boot_mailbox=0				\
226*3bdf0e5dSYatharth Kochar		_secondary_cold_boot=0				\
227*3bdf0e5dSYatharth Kochar		_init_memory=0					\
228*3bdf0e5dSYatharth Kochar		_init_c_runtime=0				\
229*3bdf0e5dSYatharth Kochar		_exception_vectors=sp_min_vector_table
230c11ba852SSoby Mathew
231c11ba852SSoby Mathew	/* --------------------------------------------
232c11ba852SSoby Mathew	 * Enable the MMU with the DCache disabled. It
233c11ba852SSoby Mathew	 * is safe to use stacks allocated in normal
234c11ba852SSoby Mathew	 * memory as a result. All memory accesses are
235c11ba852SSoby Mathew	 * marked nGnRnE when the MMU is disabled. So
236c11ba852SSoby Mathew	 * all the stack writes will make it to memory.
237c11ba852SSoby Mathew	 * All memory accesses are marked Non-cacheable
238c11ba852SSoby Mathew	 * when the MMU is enabled but D$ is disabled.
239c11ba852SSoby Mathew	 * So used stack memory is guaranteed to be
240c11ba852SSoby Mathew	 * visible immediately after the MMU is enabled
241c11ba852SSoby Mathew	 * Enabling the DCache at the same time as the
242c11ba852SSoby Mathew	 * MMU can lead to speculatively fetched and
243c11ba852SSoby Mathew	 * possibly stale stack memory being read from
244c11ba852SSoby Mathew	 * other caches. This can lead to coherency
245c11ba852SSoby Mathew	 * issues.
246c11ba852SSoby Mathew	 * --------------------------------------------
247c11ba852SSoby Mathew	 */
248c11ba852SSoby Mathew	mov	r0, #DISABLE_DCACHE
249c11ba852SSoby Mathew	bl	bl32_plat_enable_mmu
250c11ba852SSoby Mathew
251c11ba852SSoby Mathew	bl	sp_min_warm_boot
252c11ba852SSoby Mathew
253c11ba852SSoby Mathew	/* Program the registers in cpu_context and exit monitor mode */
254c11ba852SSoby Mathew	mov	r0, #NON_SECURE
255c11ba852SSoby Mathew	bl	cm_get_context
256c11ba852SSoby Mathew
257c11ba852SSoby Mathew	/* Restore the SCR */
258c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
259c11ba852SSoby Mathew	stcopr	r2, SCR
260c11ba852SSoby Mathew	isb
261c11ba852SSoby Mathew
262c11ba852SSoby Mathew	/* Restore the SCTLR  */
263c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
264c11ba852SSoby Mathew	stcopr	r2, SCTLR
265c11ba852SSoby Mathew
266c11ba852SSoby Mathew	bl	smc_get_next_ctx
267c11ba852SSoby Mathew
268c11ba852SSoby Mathew	/* The other cpu_context registers have been copied to smc context */
269c11ba852SSoby Mathew	b	sp_min_exit
270c11ba852SSoby Mathewendfunc sp_min_warm_entrypoint
271c11ba852SSoby Mathew
272c11ba852SSoby Mathew/*
273c11ba852SSoby Mathew * The function to restore the registers from SMC context and return
274c11ba852SSoby Mathew * to the mode restored to SPSR.
275c11ba852SSoby Mathew *
276c11ba852SSoby Mathew * Arguments : r0 must point to the SMC context to restore from.
277c11ba852SSoby Mathew */
278c11ba852SSoby Mathewfunc sp_min_exit
279c11ba852SSoby Mathew	smcc_restore_gp_mode_regs
280c11ba852SSoby Mathew	eret
281c11ba852SSoby Mathewendfunc sp_min_exit
282