xref: /rk3399_ARM-atf/bl32/sp_min/aarch32/entrypoint.S (revision c11ba852b970f2a125442da26d907c0842f09a25)
1*c11ba852SSoby Mathew/*
2*c11ba852SSoby Mathew * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved.
3*c11ba852SSoby Mathew *
4*c11ba852SSoby Mathew * Redistribution and use in source and binary forms, with or without
5*c11ba852SSoby Mathew * modification, are permitted provided that the following conditions are met:
6*c11ba852SSoby Mathew *
7*c11ba852SSoby Mathew * Redistributions of source code must retain the above copyright notice, this
8*c11ba852SSoby Mathew * list of conditions and the following disclaimer.
9*c11ba852SSoby Mathew *
10*c11ba852SSoby Mathew * Redistributions in binary form must reproduce the above copyright notice,
11*c11ba852SSoby Mathew * this list of conditions and the following disclaimer in the documentation
12*c11ba852SSoby Mathew * and/or other materials provided with the distribution.
13*c11ba852SSoby Mathew *
14*c11ba852SSoby Mathew * Neither the name of ARM nor the names of its contributors may be used
15*c11ba852SSoby Mathew * to endorse or promote products derived from this software without specific
16*c11ba852SSoby Mathew * prior written permission.
17*c11ba852SSoby Mathew *
18*c11ba852SSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*c11ba852SSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*c11ba852SSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*c11ba852SSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*c11ba852SSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*c11ba852SSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*c11ba852SSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*c11ba852SSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*c11ba852SSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*c11ba852SSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*c11ba852SSoby Mathew * POSSIBILITY OF SUCH DAMAGE.
29*c11ba852SSoby Mathew */
30*c11ba852SSoby Mathew
31*c11ba852SSoby Mathew#include <arch.h>
32*c11ba852SSoby Mathew#include <asm_macros.S>
33*c11ba852SSoby Mathew#include <bl_common.h>
34*c11ba852SSoby Mathew#include <context.h>
35*c11ba852SSoby Mathew#include <runtime_svc.h>
36*c11ba852SSoby Mathew#include <smcc_helpers.h>
37*c11ba852SSoby Mathew#include <smcc_macros.S>
38*c11ba852SSoby Mathew#include <xlat_tables.h>
39*c11ba852SSoby Mathew
40*c11ba852SSoby Mathew	.globl	sp_min_vector_table
41*c11ba852SSoby Mathew	.globl	sp_min_entrypoint
42*c11ba852SSoby Mathew	.globl	sp_min_warm_entrypoint
43*c11ba852SSoby Mathew
44*c11ba852SSoby Mathewfunc sp_min_vector_table
45*c11ba852SSoby Mathew	b	sp_min_entrypoint
46*c11ba852SSoby Mathew	b	plat_panic_handler	/* Undef */
47*c11ba852SSoby Mathew	b	handle_smc		/* Syscall */
48*c11ba852SSoby Mathew	b	plat_panic_handler	/* Prefetch abort */
49*c11ba852SSoby Mathew	b	plat_panic_handler	/* Data abort */
50*c11ba852SSoby Mathew	b	plat_panic_handler	/* Reserved */
51*c11ba852SSoby Mathew	b	plat_panic_handler	/* IRQ */
52*c11ba852SSoby Mathew	b	plat_panic_handler	/* FIQ */
53*c11ba852SSoby Mathewendfunc sp_min_vector_table
54*c11ba852SSoby Mathew
55*c11ba852SSoby Mathewfunc handle_smc
56*c11ba852SSoby Mathew	smcc_save_gp_mode_regs
57*c11ba852SSoby Mathew
58*c11ba852SSoby Mathew	/* r0 points to smc_context */
59*c11ba852SSoby Mathew	mov	r2, r0				/* handle */
60*c11ba852SSoby Mathew	ldcopr	r0, SCR
61*c11ba852SSoby Mathew
62*c11ba852SSoby Mathew	/* Save SCR in stack */
63*c11ba852SSoby Mathew	push	{r0}
64*c11ba852SSoby Mathew	and	r3, r0, #SCR_NS_BIT		/* flags */
65*c11ba852SSoby Mathew
66*c11ba852SSoby Mathew	/* Switch to Secure Mode*/
67*c11ba852SSoby Mathew	bic	r0, #SCR_NS_BIT
68*c11ba852SSoby Mathew	stcopr	r0, SCR
69*c11ba852SSoby Mathew	isb
70*c11ba852SSoby Mathew	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
71*c11ba852SSoby Mathew	/* Check whether an SMC64 is issued */
72*c11ba852SSoby Mathew	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
73*c11ba852SSoby Mathew	beq	1f	/* SMC32 is detected */
74*c11ba852SSoby Mathew	mov	r0, #SMC_UNK
75*c11ba852SSoby Mathew	str	r0, [r2, #SMC_CTX_GPREG_R0]
76*c11ba852SSoby Mathew	mov	r0, r2
77*c11ba852SSoby Mathew	b	2f	/* Skip handling the SMC */
78*c11ba852SSoby Mathew1:
79*c11ba852SSoby Mathew	mov	r1, #0				/* cookie */
80*c11ba852SSoby Mathew	bl	handle_runtime_svc
81*c11ba852SSoby Mathew2:
82*c11ba852SSoby Mathew	/* r0 points to smc context */
83*c11ba852SSoby Mathew
84*c11ba852SSoby Mathew	/* Restore SCR from stack */
85*c11ba852SSoby Mathew	pop	{r1}
86*c11ba852SSoby Mathew	stcopr	r1, SCR
87*c11ba852SSoby Mathew	isb
88*c11ba852SSoby Mathew
89*c11ba852SSoby Mathew	b	sp_min_exit
90*c11ba852SSoby Mathewendfunc handle_smc
91*c11ba852SSoby Mathew
92*c11ba852SSoby Mathew/*
93*c11ba852SSoby Mathew * The Cold boot/Reset entrypoint for SP_MIN
94*c11ba852SSoby Mathew */
95*c11ba852SSoby Mathewfunc sp_min_entrypoint
96*c11ba852SSoby Mathew
97*c11ba852SSoby Mathew	/*
98*c11ba852SSoby Mathew	 * The caches and TLBs are disabled at reset. If any implementation
99*c11ba852SSoby Mathew	 * allows the caches/TLB to be hit while they are disabled, ensure
100*c11ba852SSoby Mathew	 * that they are invalidated here
101*c11ba852SSoby Mathew	 */
102*c11ba852SSoby Mathew
103*c11ba852SSoby Mathew	/* Make sure we are in Secure Mode*/
104*c11ba852SSoby Mathew	ldcopr	r0, SCR
105*c11ba852SSoby Mathew	bic	r0, #SCR_NS_BIT
106*c11ba852SSoby Mathew	stcopr	r0, SCR
107*c11ba852SSoby Mathew	isb
108*c11ba852SSoby Mathew
109*c11ba852SSoby Mathew	/* Switch to monitor mode */
110*c11ba852SSoby Mathew	cps	#MODE32_mon
111*c11ba852SSoby Mathew	isb
112*c11ba852SSoby Mathew
113*c11ba852SSoby Mathew	/*
114*c11ba852SSoby Mathew	 * Set sane values for NS SCTLR as well.
115*c11ba852SSoby Mathew	 * Switch to non secure mode for this.
116*c11ba852SSoby Mathew	 */
117*c11ba852SSoby Mathew	ldr	r0, =(SCTLR_RES1)
118*c11ba852SSoby Mathew	ldcopr	r1, SCR
119*c11ba852SSoby Mathew	orr	r2, r1, #SCR_NS_BIT
120*c11ba852SSoby Mathew	stcopr	r2, SCR
121*c11ba852SSoby Mathew	isb
122*c11ba852SSoby Mathew
123*c11ba852SSoby Mathew	ldcopr	r2, SCTLR
124*c11ba852SSoby Mathew	orr	r0, r0, r2
125*c11ba852SSoby Mathew	stcopr	r0, SCTLR
126*c11ba852SSoby Mathew	isb
127*c11ba852SSoby Mathew
128*c11ba852SSoby Mathew	stcopr	r1, SCR
129*c11ba852SSoby Mathew	isb
130*c11ba852SSoby Mathew
131*c11ba852SSoby Mathew	/*
132*c11ba852SSoby Mathew	 * Set the CPU endianness before doing anything that might involve
133*c11ba852SSoby Mathew	 * memory reads or writes.
134*c11ba852SSoby Mathew	 */
135*c11ba852SSoby Mathew	ldcopr	r0, SCTLR
136*c11ba852SSoby Mathew	bic	r0, r0, #SCTLR_EE_BIT
137*c11ba852SSoby Mathew	stcopr	r0, SCTLR
138*c11ba852SSoby Mathew	isb
139*c11ba852SSoby Mathew
140*c11ba852SSoby Mathew	/* Run the CPU Specific Reset handler */
141*c11ba852SSoby Mathew	bl	reset_handler
142*c11ba852SSoby Mathew
143*c11ba852SSoby Mathew	/*
144*c11ba852SSoby Mathew	 * Enable the instruction cache and data access
145*c11ba852SSoby Mathew	 * alignment checks
146*c11ba852SSoby Mathew	 */
147*c11ba852SSoby Mathew	ldcopr	r0, SCTLR
148*c11ba852SSoby Mathew	ldr	r1, =(SCTLR_RES1 | SCTLR_A_BIT | SCTLR_I_BIT)
149*c11ba852SSoby Mathew	orr	r0, r0, r1
150*c11ba852SSoby Mathew	stcopr	r0, SCTLR
151*c11ba852SSoby Mathew	isb
152*c11ba852SSoby Mathew
153*c11ba852SSoby Mathew	/* Set the vector tables */
154*c11ba852SSoby Mathew	ldr	r0, =sp_min_vector_table
155*c11ba852SSoby Mathew	stcopr	r0, VBAR
156*c11ba852SSoby Mathew	stcopr	r0, MVBAR
157*c11ba852SSoby Mathew	isb
158*c11ba852SSoby Mathew
159*c11ba852SSoby Mathew	/*
160*c11ba852SSoby Mathew	 * Enable the SIF bit to disable instruction fetches
161*c11ba852SSoby Mathew	 * from Non-secure memory.
162*c11ba852SSoby Mathew	 */
163*c11ba852SSoby Mathew	ldcopr	r0, SCR
164*c11ba852SSoby Mathew	orr	r0, r0, #SCR_SIF_BIT
165*c11ba852SSoby Mathew	stcopr	r0, SCR
166*c11ba852SSoby Mathew
167*c11ba852SSoby Mathew	/*
168*c11ba852SSoby Mathew	 * Enable the SError interrupt now that the exception vectors have been
169*c11ba852SSoby Mathew	 * setup.
170*c11ba852SSoby Mathew	 */
171*c11ba852SSoby Mathew	cpsie   a
172*c11ba852SSoby Mathew	isb
173*c11ba852SSoby Mathew
174*c11ba852SSoby Mathew	/* Enable access to Advanced SIMD registers */
175*c11ba852SSoby Mathew	ldcopr	r0, NSACR
176*c11ba852SSoby Mathew	bic	r0, r0, #NSASEDIS_BIT
177*c11ba852SSoby Mathew	orr	r0, r0, #(NASCR_CP10_BIT | NASCR_CP11_BIT)
178*c11ba852SSoby Mathew	stcopr	r0, NSACR
179*c11ba852SSoby Mathew	isb
180*c11ba852SSoby Mathew
181*c11ba852SSoby Mathew	/*
182*c11ba852SSoby Mathew	 * Enable access to Advanced SIMD, Floating point and to the Trace
183*c11ba852SSoby Mathew	 * functionality as well.
184*c11ba852SSoby Mathew	 */
185*c11ba852SSoby Mathew	ldcopr	r0, CPACR
186*c11ba852SSoby Mathew	bic	r0, r0, #ASEDIS_BIT
187*c11ba852SSoby Mathew	bic	r0, r0, #TRCDIS_BIT
188*c11ba852SSoby Mathew	orr	r0, r0, #CPACR_ENABLE_FP_ACCESS
189*c11ba852SSoby Mathew	stcopr	r0, CPACR
190*c11ba852SSoby Mathew	isb
191*c11ba852SSoby Mathew
192*c11ba852SSoby Mathew	vmrs	r0, FPEXC
193*c11ba852SSoby Mathew	orr	r0, r0, #FPEXC_EN_BIT
194*c11ba852SSoby Mathew	vmsr	FPEXC, r0
195*c11ba852SSoby Mathew
196*c11ba852SSoby Mathew	/* Detect whether Warm or Cold boot */
197*c11ba852SSoby Mathew	bl	plat_get_my_entrypoint
198*c11ba852SSoby Mathew	cmp	r0, #0
199*c11ba852SSoby Mathew	/* If warm boot detected, jump to warm boot entry */
200*c11ba852SSoby Mathew	bxne	r0
201*c11ba852SSoby Mathew
202*c11ba852SSoby Mathew	/* Setup C runtime stack */
203*c11ba852SSoby Mathew	bl	plat_set_my_stack
204*c11ba852SSoby Mathew
205*c11ba852SSoby Mathew	/* Perform platform specific memory initialization */
206*c11ba852SSoby Mathew	bl	platform_mem_init
207*c11ba852SSoby Mathew
208*c11ba852SSoby Mathew	/* Initialize the C Runtime Environment */
209*c11ba852SSoby Mathew
210*c11ba852SSoby Mathew	/*
211*c11ba852SSoby Mathew	 * Invalidate the RW memory used by SP_MIN image. This includes
212*c11ba852SSoby Mathew	 * the data and NOBITS sections. This is done to safeguard against
213*c11ba852SSoby Mathew	 * possible corruption of this memory by dirty cache lines in a system
214*c11ba852SSoby Mathew	 * cache as a result of use by an earlier boot loader stage.
215*c11ba852SSoby Mathew	 */
216*c11ba852SSoby Mathew	ldr	r0, =__RW_START__
217*c11ba852SSoby Mathew	ldr	r1, =__RW_END__
218*c11ba852SSoby Mathew	sub	r1, r1, r0
219*c11ba852SSoby Mathew	bl	inv_dcache_range
220*c11ba852SSoby Mathew
221*c11ba852SSoby Mathew	ldr	r0, =__BSS_START__
222*c11ba852SSoby Mathew	ldr	r1, =__BSS_SIZE__
223*c11ba852SSoby Mathew	bl	zeromem
224*c11ba852SSoby Mathew
225*c11ba852SSoby Mathew#if USE_COHERENT_MEM
226*c11ba852SSoby Mathew	ldr	r0, =__COHERENT_RAM_START__
227*c11ba852SSoby Mathew	ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
228*c11ba852SSoby Mathew	bl	zeromem
229*c11ba852SSoby Mathew#endif
230*c11ba852SSoby Mathew
231*c11ba852SSoby Mathew	/* Perform platform specific early arch. setup */
232*c11ba852SSoby Mathew	bl	sp_min_early_platform_setup
233*c11ba852SSoby Mathew	bl	sp_min_plat_arch_setup
234*c11ba852SSoby Mathew
235*c11ba852SSoby Mathew	/* Jump to the main function */
236*c11ba852SSoby Mathew	bl	sp_min_main
237*c11ba852SSoby Mathew
238*c11ba852SSoby Mathew	/* -------------------------------------------------------------
239*c11ba852SSoby Mathew	 * Clean the .data & .bss sections to main memory. This ensures
240*c11ba852SSoby Mathew	 * that any global data which was initialised by the primary CPU
241*c11ba852SSoby Mathew	 * is visible to secondary CPUs before they enable their data
242*c11ba852SSoby Mathew	 * caches and participate in coherency.
243*c11ba852SSoby Mathew	 * -------------------------------------------------------------
244*c11ba852SSoby Mathew	 */
245*c11ba852SSoby Mathew	ldr	r0, =__DATA_START__
246*c11ba852SSoby Mathew	ldr	r1, =__DATA_END__
247*c11ba852SSoby Mathew	sub	r1, r1, r0
248*c11ba852SSoby Mathew	bl	clean_dcache_range
249*c11ba852SSoby Mathew
250*c11ba852SSoby Mathew	ldr	r0, =__BSS_START__
251*c11ba852SSoby Mathew	ldr	r1, =__BSS_END__
252*c11ba852SSoby Mathew	sub	r1, r1, r0
253*c11ba852SSoby Mathew	bl	clean_dcache_range
254*c11ba852SSoby Mathew
255*c11ba852SSoby Mathew	/* Program the registers in cpu_context and exit monitor mode */
256*c11ba852SSoby Mathew	mov	r0, #NON_SECURE
257*c11ba852SSoby Mathew	bl	cm_get_context
258*c11ba852SSoby Mathew
259*c11ba852SSoby Mathew	/* Restore the SCR */
260*c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
261*c11ba852SSoby Mathew	stcopr	r2, SCR
262*c11ba852SSoby Mathew	isb
263*c11ba852SSoby Mathew
264*c11ba852SSoby Mathew	/* Restore the SCTLR  */
265*c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
266*c11ba852SSoby Mathew	stcopr	r2, SCTLR
267*c11ba852SSoby Mathew
268*c11ba852SSoby Mathew	bl	smc_get_next_ctx
269*c11ba852SSoby Mathew	/* The other cpu_context registers have been copied to smc context */
270*c11ba852SSoby Mathew	b	sp_min_exit
271*c11ba852SSoby Mathewendfunc sp_min_entrypoint
272*c11ba852SSoby Mathew
273*c11ba852SSoby Mathew/*
274*c11ba852SSoby Mathew * The Warm boot entrypoint for SP_MIN.
275*c11ba852SSoby Mathew */
276*c11ba852SSoby Mathewfunc sp_min_warm_entrypoint
277*c11ba852SSoby Mathew
278*c11ba852SSoby Mathew	/* Setup C runtime stack */
279*c11ba852SSoby Mathew	bl	plat_set_my_stack
280*c11ba852SSoby Mathew
281*c11ba852SSoby Mathew	/* --------------------------------------------
282*c11ba852SSoby Mathew	 * Enable the MMU with the DCache disabled. It
283*c11ba852SSoby Mathew	 * is safe to use stacks allocated in normal
284*c11ba852SSoby Mathew	 * memory as a result. All memory accesses are
285*c11ba852SSoby Mathew	 * marked nGnRnE when the MMU is disabled. So
286*c11ba852SSoby Mathew	 * all the stack writes will make it to memory.
287*c11ba852SSoby Mathew	 * All memory accesses are marked Non-cacheable
288*c11ba852SSoby Mathew	 * when the MMU is enabled but D$ is disabled.
289*c11ba852SSoby Mathew	 * So used stack memory is guaranteed to be
290*c11ba852SSoby Mathew	 * visible immediately after the MMU is enabled
291*c11ba852SSoby Mathew	 * Enabling the DCache at the same time as the
292*c11ba852SSoby Mathew	 * MMU can lead to speculatively fetched and
293*c11ba852SSoby Mathew	 * possibly stale stack memory being read from
294*c11ba852SSoby Mathew	 * other caches. This can lead to coherency
295*c11ba852SSoby Mathew	 * issues.
296*c11ba852SSoby Mathew	 * --------------------------------------------
297*c11ba852SSoby Mathew	 */
298*c11ba852SSoby Mathew	mov	r0, #DISABLE_DCACHE
299*c11ba852SSoby Mathew	bl	bl32_plat_enable_mmu
300*c11ba852SSoby Mathew
301*c11ba852SSoby Mathew	bl	sp_min_warm_boot
302*c11ba852SSoby Mathew
303*c11ba852SSoby Mathew	/* Program the registers in cpu_context and exit monitor mode */
304*c11ba852SSoby Mathew	mov	r0, #NON_SECURE
305*c11ba852SSoby Mathew	bl	cm_get_context
306*c11ba852SSoby Mathew
307*c11ba852SSoby Mathew	/* Restore the SCR */
308*c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
309*c11ba852SSoby Mathew	stcopr	r2, SCR
310*c11ba852SSoby Mathew	isb
311*c11ba852SSoby Mathew
312*c11ba852SSoby Mathew	/* Restore the SCTLR  */
313*c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
314*c11ba852SSoby Mathew	stcopr	r2, SCTLR
315*c11ba852SSoby Mathew
316*c11ba852SSoby Mathew	bl	smc_get_next_ctx
317*c11ba852SSoby Mathew
318*c11ba852SSoby Mathew	/* The other cpu_context registers have been copied to smc context */
319*c11ba852SSoby Mathew	b	sp_min_exit
320*c11ba852SSoby Mathewendfunc sp_min_warm_entrypoint
321*c11ba852SSoby Mathew
322*c11ba852SSoby Mathew/*
323*c11ba852SSoby Mathew * The function to restore the registers from SMC context and return
324*c11ba852SSoby Mathew * to the mode restored to SPSR.
325*c11ba852SSoby Mathew *
326*c11ba852SSoby Mathew * Arguments : r0 must point to the SMC context to restore from.
327*c11ba852SSoby Mathew */
328*c11ba852SSoby Mathewfunc sp_min_exit
329*c11ba852SSoby Mathew	smcc_restore_gp_mode_regs
330*c11ba852SSoby Mathew	eret
331*c11ba852SSoby Mathewendfunc sp_min_exit
332