xref: /rk3399_ARM-atf/bl32/sp_min/aarch32/entrypoint.S (revision 25a93f7cd181ca79a631864b7c076fa7106f4365)
1c11ba852SSoby Mathew/*
2*25a93f7cSJeenu Viswambharan * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3c11ba852SSoby Mathew *
4c11ba852SSoby Mathew * Redistribution and use in source and binary forms, with or without
5c11ba852SSoby Mathew * modification, are permitted provided that the following conditions are met:
6c11ba852SSoby Mathew *
7c11ba852SSoby Mathew * Redistributions of source code must retain the above copyright notice, this
8c11ba852SSoby Mathew * list of conditions and the following disclaimer.
9c11ba852SSoby Mathew *
10c11ba852SSoby Mathew * Redistributions in binary form must reproduce the above copyright notice,
11c11ba852SSoby Mathew * this list of conditions and the following disclaimer in the documentation
12c11ba852SSoby Mathew * and/or other materials provided with the distribution.
13c11ba852SSoby Mathew *
14c11ba852SSoby Mathew * Neither the name of ARM nor the names of its contributors may be used
15c11ba852SSoby Mathew * to endorse or promote products derived from this software without specific
16c11ba852SSoby Mathew * prior written permission.
17c11ba852SSoby Mathew *
18c11ba852SSoby Mathew * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19c11ba852SSoby Mathew * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20c11ba852SSoby Mathew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21c11ba852SSoby Mathew * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22c11ba852SSoby Mathew * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23c11ba852SSoby Mathew * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24c11ba852SSoby Mathew * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25c11ba852SSoby Mathew * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26c11ba852SSoby Mathew * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27c11ba852SSoby Mathew * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28c11ba852SSoby Mathew * POSSIBILITY OF SUCH DAMAGE.
29c11ba852SSoby Mathew */
30c11ba852SSoby Mathew
31c11ba852SSoby Mathew#include <arch.h>
32c11ba852SSoby Mathew#include <asm_macros.S>
33c11ba852SSoby Mathew#include <bl_common.h>
34c11ba852SSoby Mathew#include <context.h>
353bdf0e5dSYatharth Kochar#include <el3_common_macros.S>
36c11ba852SSoby Mathew#include <runtime_svc.h>
37c11ba852SSoby Mathew#include <smcc_helpers.h>
38c11ba852SSoby Mathew#include <smcc_macros.S>
39c11ba852SSoby Mathew#include <xlat_tables.h>
40c11ba852SSoby Mathew
41c11ba852SSoby Mathew	.globl	sp_min_vector_table
42c11ba852SSoby Mathew	.globl	sp_min_entrypoint
43c11ba852SSoby Mathew	.globl	sp_min_warm_entrypoint
44c11ba852SSoby Mathew
453bdf0e5dSYatharth Kochar
463bdf0e5dSYatharth Kocharvector_base sp_min_vector_table
47c11ba852SSoby Mathew	b	sp_min_entrypoint
48c11ba852SSoby Mathew	b	plat_panic_handler	/* Undef */
49c11ba852SSoby Mathew	b	handle_smc		/* Syscall */
50c11ba852SSoby Mathew	b	plat_panic_handler	/* Prefetch abort */
51c11ba852SSoby Mathew	b	plat_panic_handler	/* Data abort */
52c11ba852SSoby Mathew	b	plat_panic_handler	/* Reserved */
53c11ba852SSoby Mathew	b	plat_panic_handler	/* IRQ */
54c11ba852SSoby Mathew	b	plat_panic_handler	/* FIQ */
55c11ba852SSoby Mathew
56c11ba852SSoby Mathew
57c11ba852SSoby Mathew/*
58c11ba852SSoby Mathew * The Cold boot/Reset entrypoint for SP_MIN
59c11ba852SSoby Mathew */
60c11ba852SSoby Mathewfunc sp_min_entrypoint
613bdf0e5dSYatharth Kochar#if !RESET_TO_SP_MIN
623bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------
633bdf0e5dSYatharth Kochar	 * Preceding bootloader has populated r0 with a pointer to a
643bdf0e5dSYatharth Kochar	 * 'bl_params_t' structure & r1 with a pointer to platform
653bdf0e5dSYatharth Kochar	 * specific structure
663bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------
67c11ba852SSoby Mathew	 */
683bdf0e5dSYatharth Kochar	mov	r11, r0
693bdf0e5dSYatharth Kochar	mov	r12, r1
70c11ba852SSoby Mathew
713bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------------
723bdf0e5dSYatharth Kochar	 * For !RESET_TO_SP_MIN systems, only the primary CPU ever reaches
733bdf0e5dSYatharth Kochar	 * sp_min_entrypoint() during the cold boot flow, so the cold/warm boot
743bdf0e5dSYatharth Kochar	 * and primary/secondary CPU logic should not be executed in this case.
753bdf0e5dSYatharth Kochar	 *
763bdf0e5dSYatharth Kochar	 * Also, assume that the previous bootloader has already set up the CPU
773bdf0e5dSYatharth Kochar	 * endianness and has initialised the memory.
783bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------------
79c11ba852SSoby Mathew	 */
803bdf0e5dSYatharth Kochar	el3_entrypoint_common					\
813bdf0e5dSYatharth Kochar		_set_endian=0					\
823bdf0e5dSYatharth Kochar		_warm_boot_mailbox=0				\
833bdf0e5dSYatharth Kochar		_secondary_cold_boot=0				\
843bdf0e5dSYatharth Kochar		_init_memory=0					\
853bdf0e5dSYatharth Kochar		_init_c_runtime=1				\
863bdf0e5dSYatharth Kochar		_exception_vectors=sp_min_vector_table
87c11ba852SSoby Mathew
883bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------------
893bdf0e5dSYatharth Kochar	 * Relay the previous bootloader's arguments to the platform layer
903bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------------
91c11ba852SSoby Mathew	 */
923bdf0e5dSYatharth Kochar	mov	r0, r11
933bdf0e5dSYatharth Kochar	mov	r1, r12
943bdf0e5dSYatharth Kochar#else
953bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------------
963bdf0e5dSYatharth Kochar	 * For RESET_TO_SP_MIN systems which have a programmable reset address,
973bdf0e5dSYatharth Kochar	 * sp_min_entrypoint() is executed only on the cold boot path so we can
983bdf0e5dSYatharth Kochar	 * skip the warm boot mailbox mechanism.
993bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------------
100c11ba852SSoby Mathew	 */
1013bdf0e5dSYatharth Kochar	el3_entrypoint_common					\
1023bdf0e5dSYatharth Kochar		_set_endian=1					\
1033bdf0e5dSYatharth Kochar		_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS	\
1043bdf0e5dSYatharth Kochar		_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU	\
1053bdf0e5dSYatharth Kochar		_init_memory=1					\
1063bdf0e5dSYatharth Kochar		_init_c_runtime=1				\
1073bdf0e5dSYatharth Kochar		_exception_vectors=sp_min_vector_table
108c11ba852SSoby Mathew
1093bdf0e5dSYatharth Kochar	/* ---------------------------------------------------------------------
1103bdf0e5dSYatharth Kochar	 * For RESET_TO_SP_MIN systems, BL32 (SP_MIN) is the first bootloader
1113bdf0e5dSYatharth Kochar	 * to run so there's no argument to relay from a previous bootloader.
1123bdf0e5dSYatharth Kochar	 * Zero the arguments passed to the platform layer to reflect that.
1133bdf0e5dSYatharth Kochar	 * ---------------------------------------------------------------------
114c11ba852SSoby Mathew	 */
1153bdf0e5dSYatharth Kochar	mov	r0, #0
1163bdf0e5dSYatharth Kochar	mov	r1, #0
1173bdf0e5dSYatharth Kochar#endif /* RESET_TO_SP_MIN */
118c11ba852SSoby Mathew
119c11ba852SSoby Mathew	bl	sp_min_early_platform_setup
120c11ba852SSoby Mathew	bl	sp_min_plat_arch_setup
121c11ba852SSoby Mathew
122c11ba852SSoby Mathew	/* Jump to the main function */
123c11ba852SSoby Mathew	bl	sp_min_main
124c11ba852SSoby Mathew
125c11ba852SSoby Mathew	/* -------------------------------------------------------------
126c11ba852SSoby Mathew	 * Clean the .data & .bss sections to main memory. This ensures
127c11ba852SSoby Mathew	 * that any global data which was initialised by the primary CPU
128c11ba852SSoby Mathew	 * is visible to secondary CPUs before they enable their data
129c11ba852SSoby Mathew	 * caches and participate in coherency.
130c11ba852SSoby Mathew	 * -------------------------------------------------------------
131c11ba852SSoby Mathew	 */
132c11ba852SSoby Mathew	ldr	r0, =__DATA_START__
133c11ba852SSoby Mathew	ldr	r1, =__DATA_END__
134c11ba852SSoby Mathew	sub	r1, r1, r0
135c11ba852SSoby Mathew	bl	clean_dcache_range
136c11ba852SSoby Mathew
137c11ba852SSoby Mathew	ldr	r0, =__BSS_START__
138c11ba852SSoby Mathew	ldr	r1, =__BSS_END__
139c11ba852SSoby Mathew	sub	r1, r1, r0
140c11ba852SSoby Mathew	bl	clean_dcache_range
141c11ba852SSoby Mathew
142c11ba852SSoby Mathew	/* Program the registers in cpu_context and exit monitor mode */
143c11ba852SSoby Mathew	mov	r0, #NON_SECURE
144c11ba852SSoby Mathew	bl	cm_get_context
145c11ba852SSoby Mathew
146c11ba852SSoby Mathew	/* Restore the SCR */
147c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
148c11ba852SSoby Mathew	stcopr	r2, SCR
149c11ba852SSoby Mathew	isb
150c11ba852SSoby Mathew
151c11ba852SSoby Mathew	/* Restore the SCTLR  */
152c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
153c11ba852SSoby Mathew	stcopr	r2, SCTLR
154c11ba852SSoby Mathew
155c11ba852SSoby Mathew	bl	smc_get_next_ctx
156c11ba852SSoby Mathew	/* The other cpu_context registers have been copied to smc context */
157c11ba852SSoby Mathew	b	sp_min_exit
158c11ba852SSoby Mathewendfunc sp_min_entrypoint
159c11ba852SSoby Mathew
1603bdf0e5dSYatharth Kochar
1613bdf0e5dSYatharth Kochar/*
1623bdf0e5dSYatharth Kochar * SMC handling function for SP_MIN.
1633bdf0e5dSYatharth Kochar */
1643bdf0e5dSYatharth Kocharfunc handle_smc
1653bdf0e5dSYatharth Kochar	smcc_save_gp_mode_regs
1663bdf0e5dSYatharth Kochar
1673bdf0e5dSYatharth Kochar	/* r0 points to smc_context */
1683bdf0e5dSYatharth Kochar	mov	r2, r0				/* handle */
1693bdf0e5dSYatharth Kochar	ldcopr	r0, SCR
1703bdf0e5dSYatharth Kochar
1719f3ee61cSSoby Mathew	/*
1729f3ee61cSSoby Mathew	 * Save SCR in stack. r1 is pushed to meet the 8 byte
1739f3ee61cSSoby Mathew	 * stack alignment requirement.
1749f3ee61cSSoby Mathew	 */
1759f3ee61cSSoby Mathew	push	{r0, r1}
1763bdf0e5dSYatharth Kochar	and	r3, r0, #SCR_NS_BIT		/* flags */
1773bdf0e5dSYatharth Kochar
1783bdf0e5dSYatharth Kochar	/* Switch to Secure Mode*/
1793bdf0e5dSYatharth Kochar	bic	r0, #SCR_NS_BIT
1803bdf0e5dSYatharth Kochar	stcopr	r0, SCR
1813bdf0e5dSYatharth Kochar	isb
1823bdf0e5dSYatharth Kochar	ldr	r0, [r2, #SMC_CTX_GPREG_R0]	/* smc_fid */
1833bdf0e5dSYatharth Kochar	/* Check whether an SMC64 is issued */
1843bdf0e5dSYatharth Kochar	tst	r0, #(FUNCID_CC_MASK << FUNCID_CC_SHIFT)
1853bdf0e5dSYatharth Kochar	beq	1f	/* SMC32 is detected */
1863bdf0e5dSYatharth Kochar	mov	r0, #SMC_UNK
1873bdf0e5dSYatharth Kochar	str	r0, [r2, #SMC_CTX_GPREG_R0]
1883bdf0e5dSYatharth Kochar	mov	r0, r2
1893bdf0e5dSYatharth Kochar	b	2f	/* Skip handling the SMC */
1903bdf0e5dSYatharth Kochar1:
1913bdf0e5dSYatharth Kochar	mov	r1, #0				/* cookie */
1923bdf0e5dSYatharth Kochar	bl	handle_runtime_svc
1933bdf0e5dSYatharth Kochar2:
1943bdf0e5dSYatharth Kochar	/* r0 points to smc context */
1953bdf0e5dSYatharth Kochar
1963bdf0e5dSYatharth Kochar	/* Restore SCR from stack */
1979f3ee61cSSoby Mathew	pop	{r1, r2}
1983bdf0e5dSYatharth Kochar	stcopr	r1, SCR
1993bdf0e5dSYatharth Kochar	isb
2003bdf0e5dSYatharth Kochar
2013bdf0e5dSYatharth Kochar	b	sp_min_exit
2023bdf0e5dSYatharth Kocharendfunc handle_smc
2033bdf0e5dSYatharth Kochar
2043bdf0e5dSYatharth Kochar
205c11ba852SSoby Mathew/*
206c11ba852SSoby Mathew * The Warm boot entrypoint for SP_MIN.
207c11ba852SSoby Mathew */
208c11ba852SSoby Mathewfunc sp_min_warm_entrypoint
2093bdf0e5dSYatharth Kochar	/*
2103bdf0e5dSYatharth Kochar	 * On the warm boot path, most of the EL3 initialisations performed by
2113bdf0e5dSYatharth Kochar	 * 'el3_entrypoint_common' must be skipped:
2123bdf0e5dSYatharth Kochar	 *
2133bdf0e5dSYatharth Kochar	 *  - Only when the platform bypasses the BL1/BL32 (SP_MIN) entrypoint by
2143bdf0e5dSYatharth Kochar	 *    programming the reset address do we need to set the CPU endianness.
2153bdf0e5dSYatharth Kochar	 *    In other cases, we assume this has been taken care by the
2163bdf0e5dSYatharth Kochar	 *    entrypoint code.
2173bdf0e5dSYatharth Kochar	 *
2183bdf0e5dSYatharth Kochar	 *  - No need to determine the type of boot, we know it is a warm boot.
2193bdf0e5dSYatharth Kochar	 *
2203bdf0e5dSYatharth Kochar	 *  - Do not try to distinguish between primary and secondary CPUs, this
2213bdf0e5dSYatharth Kochar	 *    notion only exists for a cold boot.
2223bdf0e5dSYatharth Kochar	 *
2233bdf0e5dSYatharth Kochar	 *  - No need to initialise the memory or the C runtime environment,
2243bdf0e5dSYatharth Kochar	 *    it has been done once and for all on the cold boot path.
2253bdf0e5dSYatharth Kochar	 */
2263bdf0e5dSYatharth Kochar	el3_entrypoint_common					\
2273bdf0e5dSYatharth Kochar		_set_endian=PROGRAMMABLE_RESET_ADDRESS		\
2283bdf0e5dSYatharth Kochar		_warm_boot_mailbox=0				\
2293bdf0e5dSYatharth Kochar		_secondary_cold_boot=0				\
2303bdf0e5dSYatharth Kochar		_init_memory=0					\
2313bdf0e5dSYatharth Kochar		_init_c_runtime=0				\
2323bdf0e5dSYatharth Kochar		_exception_vectors=sp_min_vector_table
233c11ba852SSoby Mathew
234*25a93f7cSJeenu Viswambharan	/*
235*25a93f7cSJeenu Viswambharan	 * We're about to enable MMU and participate in PSCI state coordination.
236*25a93f7cSJeenu Viswambharan	 *
237*25a93f7cSJeenu Viswambharan	 * The PSCI implementation invokes platform routines that enable CPUs to
238*25a93f7cSJeenu Viswambharan	 * participate in coherency. On a system where CPUs are not
239*25a93f7cSJeenu Viswambharan	 * cache-coherent out of reset, having caches enabled until such time
240*25a93f7cSJeenu Viswambharan	 * might lead to coherency issues (resulting from stale data getting
241*25a93f7cSJeenu Viswambharan	 * speculatively fetched, among others). Therefore we keep data caches
242*25a93f7cSJeenu Viswambharan	 * disabled while enabling the MMU, thereby forcing data accesses to
243*25a93f7cSJeenu Viswambharan	 * have non-cacheable, nGnRnE attributes (these will always be coherent
244*25a93f7cSJeenu Viswambharan	 * with main memory).
245*25a93f7cSJeenu Viswambharan	 *
246*25a93f7cSJeenu Viswambharan	 * On systems where CPUs are cache-coherent out of reset, however, PSCI
247*25a93f7cSJeenu Viswambharan	 * need not invoke platform routines to enter coherency (as CPUs already
248*25a93f7cSJeenu Viswambharan	 * are), and there's no reason to have caches disabled either.
249c11ba852SSoby Mathew	 */
250*25a93f7cSJeenu Viswambharan#if HW_ASSISTED_COHERENCY
251*25a93f7cSJeenu Viswambharan	mov	r0, #0
252*25a93f7cSJeenu Viswambharan#else
253c11ba852SSoby Mathew	mov	r0, #DISABLE_DCACHE
254*25a93f7cSJeenu Viswambharan#endif
255c11ba852SSoby Mathew	bl	bl32_plat_enable_mmu
256c11ba852SSoby Mathew
257c11ba852SSoby Mathew	bl	sp_min_warm_boot
258c11ba852SSoby Mathew
259c11ba852SSoby Mathew	/* Program the registers in cpu_context and exit monitor mode */
260c11ba852SSoby Mathew	mov	r0, #NON_SECURE
261c11ba852SSoby Mathew	bl	cm_get_context
262c11ba852SSoby Mathew
263c11ba852SSoby Mathew	/* Restore the SCR */
264c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_SCR]
265c11ba852SSoby Mathew	stcopr	r2, SCR
266c11ba852SSoby Mathew	isb
267c11ba852SSoby Mathew
268c11ba852SSoby Mathew	/* Restore the SCTLR  */
269c11ba852SSoby Mathew	ldr	r2, [r0, #CTX_REGS_OFFSET + CTX_NS_SCTLR]
270c11ba852SSoby Mathew	stcopr	r2, SCTLR
271c11ba852SSoby Mathew
272c11ba852SSoby Mathew	bl	smc_get_next_ctx
273c11ba852SSoby Mathew
274c11ba852SSoby Mathew	/* The other cpu_context registers have been copied to smc context */
275c11ba852SSoby Mathew	b	sp_min_exit
276c11ba852SSoby Mathewendfunc sp_min_warm_entrypoint
277c11ba852SSoby Mathew
278c11ba852SSoby Mathew/*
279c11ba852SSoby Mathew * The function to restore the registers from SMC context and return
280c11ba852SSoby Mathew * to the mode restored to SPSR.
281c11ba852SSoby Mathew *
282c11ba852SSoby Mathew * Arguments : r0 must point to the SMC context to restore from.
283c11ba852SSoby Mathew */
284c11ba852SSoby Mathewfunc sp_min_exit
285c11ba852SSoby Mathew	smcc_restore_gp_mode_regs
286c11ba852SSoby Mathew	eret
287c11ba852SSoby Mathewendfunc sp_min_exit
288