xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision 6cf89021312a33395f804d80377a6ffdaadbbe21)
17c88f3f6SAchin Gupta/*
27c88f3f6SAchin Gupta * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
47c88f3f6SAchin Gupta * Redistribution and use in source and binary forms, with or without
57c88f3f6SAchin Gupta * modification, are permitted provided that the following conditions are met:
67c88f3f6SAchin Gupta *
77c88f3f6SAchin Gupta * Redistributions of source code must retain the above copyright notice, this
87c88f3f6SAchin Gupta * list of conditions and the following disclaimer.
97c88f3f6SAchin Gupta *
107c88f3f6SAchin Gupta * Redistributions in binary form must reproduce the above copyright notice,
117c88f3f6SAchin Gupta * this list of conditions and the following disclaimer in the documentation
127c88f3f6SAchin Gupta * and/or other materials provided with the distribution.
137c88f3f6SAchin Gupta *
147c88f3f6SAchin Gupta * Neither the name of ARM nor the names of its contributors may be used
157c88f3f6SAchin Gupta * to endorse or promote products derived from this software without specific
167c88f3f6SAchin Gupta * prior written permission.
177c88f3f6SAchin Gupta *
187c88f3f6SAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
197c88f3f6SAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
207c88f3f6SAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
217c88f3f6SAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
227c88f3f6SAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
237c88f3f6SAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
247c88f3f6SAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
257c88f3f6SAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
267c88f3f6SAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
277c88f3f6SAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
287c88f3f6SAchin Gupta * POSSIBILITY OF SUCH DAMAGE.
297c88f3f6SAchin Gupta */
307c88f3f6SAchin Gupta
317c88f3f6SAchin Gupta#include <arch.h>
320a30cf54SAndrew Thoelke#include <asm_macros.S>
3397043ac9SDan Handley#include <tsp.h>
347c88f3f6SAchin Gupta
357c88f3f6SAchin Gupta
367c88f3f6SAchin Gupta	.globl	tsp_entrypoint
377c88f3f6SAchin Gupta	.globl	tsp_cpu_on_entry
387c88f3f6SAchin Gupta	.globl	tsp_cpu_off_entry
397c88f3f6SAchin Gupta	.globl	tsp_cpu_suspend_entry
407c88f3f6SAchin Gupta	.globl	tsp_cpu_resume_entry
417c88f3f6SAchin Gupta	.globl	tsp_fast_smc_entry
42*6cf89021SAchin Gupta	.globl	tsp_fiq_entry
437c88f3f6SAchin Gupta
447c88f3f6SAchin Gupta	/* ---------------------------------------------
457c88f3f6SAchin Gupta	 * Populate the params in x0-x7 from the pointer
467c88f3f6SAchin Gupta	 * to the smc args structure in x0.
477c88f3f6SAchin Gupta	 * ---------------------------------------------
487c88f3f6SAchin Gupta	 */
497c88f3f6SAchin Gupta	.macro restore_args_call_smc
507c88f3f6SAchin Gupta	ldp	x6, x7, [x0, #TSP_ARG6]
517c88f3f6SAchin Gupta	ldp	x4, x5, [x0, #TSP_ARG4]
527c88f3f6SAchin Gupta	ldp	x2, x3, [x0, #TSP_ARG2]
537c88f3f6SAchin Gupta	ldp	x0, x1, [x0, #TSP_ARG0]
547c88f3f6SAchin Gupta	smc	#0
557c88f3f6SAchin Gupta	.endm
567c88f3f6SAchin Gupta
57*6cf89021SAchin Gupta	.macro	save_eret_context reg1 reg2
58*6cf89021SAchin Gupta	mrs	\reg1, elr_el1
59*6cf89021SAchin Gupta	mrs	\reg2, spsr_el1
60*6cf89021SAchin Gupta	stp	\reg1, \reg2, [sp, #-0x10]!
61*6cf89021SAchin Gupta	stp	x30, x18, [sp, #-0x10]!
62*6cf89021SAchin Gupta	.endm
63*6cf89021SAchin Gupta
64*6cf89021SAchin Gupta	.macro restore_eret_context reg1 reg2
65*6cf89021SAchin Gupta	ldp	x30, x18, [sp], #0x10
66*6cf89021SAchin Gupta	ldp	\reg1, \reg2, [sp], #0x10
67*6cf89021SAchin Gupta	msr	elr_el1, \reg1
68*6cf89021SAchin Gupta	msr	spsr_el1, \reg2
69*6cf89021SAchin Gupta	.endm
70*6cf89021SAchin Gupta
71*6cf89021SAchin Gupta	.section	.text, "ax"
72*6cf89021SAchin Gupta	.align 3
737c88f3f6SAchin Gupta
740a30cf54SAndrew Thoelkefunc tsp_entrypoint
757c88f3f6SAchin Gupta
767c88f3f6SAchin Gupta	/* ---------------------------------------------
777c88f3f6SAchin Gupta	 * The entrypoint is expected to be executed
787c88f3f6SAchin Gupta	 * only by the primary cpu (at least for now).
797c88f3f6SAchin Gupta	 * So, make sure no secondary has lost its way.
807c88f3f6SAchin Gupta	 * ---------------------------------------------
817c88f3f6SAchin Gupta	 */
827c88f3f6SAchin Gupta	mrs	x0, mpidr_el1
837c88f3f6SAchin Gupta	bl	platform_is_primary_cpu
847c88f3f6SAchin Gupta	cbz	x0, tsp_entrypoint_panic
857c88f3f6SAchin Gupta
867c88f3f6SAchin Gupta	/* ---------------------------------------------
877c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
887c88f3f6SAchin Gupta	 * ---------------------------------------------
897c88f3f6SAchin Gupta	 */
907c88f3f6SAchin Gupta	adr	x0, early_exceptions
917c88f3f6SAchin Gupta	msr	vbar_el1, x0
927c88f3f6SAchin Gupta
937c88f3f6SAchin Gupta	/* ---------------------------------------------
947c88f3f6SAchin Gupta	 * Enable the instruction cache.
957c88f3f6SAchin Gupta	 * ---------------------------------------------
967c88f3f6SAchin Gupta	 */
977c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
987c88f3f6SAchin Gupta	orr	x0, x0, #SCTLR_I_BIT
997c88f3f6SAchin Gupta	msr	sctlr_el1, x0
1007c88f3f6SAchin Gupta	isb
1017c88f3f6SAchin Gupta
1027c88f3f6SAchin Gupta	/* ---------------------------------------------
1037c88f3f6SAchin Gupta	 * Zero out NOBITS sections. There are 2 of them:
1047c88f3f6SAchin Gupta	 *   - the .bss section;
1057c88f3f6SAchin Gupta	 *   - the coherent memory section.
1067c88f3f6SAchin Gupta	 * ---------------------------------------------
1077c88f3f6SAchin Gupta	 */
1087c88f3f6SAchin Gupta	ldr	x0, =__BSS_START__
1097c88f3f6SAchin Gupta	ldr	x1, =__BSS_SIZE__
1107c88f3f6SAchin Gupta	bl	zeromem16
1117c88f3f6SAchin Gupta
1127c88f3f6SAchin Gupta	ldr	x0, =__COHERENT_RAM_START__
1137c88f3f6SAchin Gupta	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
1147c88f3f6SAchin Gupta	bl	zeromem16
1157c88f3f6SAchin Gupta
1167c88f3f6SAchin Gupta	/* --------------------------------------------
1177c88f3f6SAchin Gupta	 * Give ourselves a small coherent stack to
1187c88f3f6SAchin Gupta	 * ease the pain of initializing the MMU
1197c88f3f6SAchin Gupta	 * --------------------------------------------
1207c88f3f6SAchin Gupta	 */
1217c88f3f6SAchin Gupta	mrs	x0, mpidr_el1
1227c88f3f6SAchin Gupta	bl	platform_set_coherent_stack
1237c88f3f6SAchin Gupta
1247c88f3f6SAchin Gupta	/* ---------------------------------------------
1257c88f3f6SAchin Gupta	 * Perform early platform setup & platform
1267c88f3f6SAchin Gupta	 * specific early arch. setup e.g. mmu setup
1277c88f3f6SAchin Gupta	 * ---------------------------------------------
1287c88f3f6SAchin Gupta	 */
1297c88f3f6SAchin Gupta	bl	bl32_early_platform_setup
1307c88f3f6SAchin Gupta	bl	bl32_plat_arch_setup
1317c88f3f6SAchin Gupta
1327c88f3f6SAchin Gupta	/* ---------------------------------------------
1337c88f3f6SAchin Gupta	 * Give ourselves a stack allocated in Normal
1347c88f3f6SAchin Gupta	 * -IS-WBWA memory
1357c88f3f6SAchin Gupta	 * ---------------------------------------------
1367c88f3f6SAchin Gupta	 */
1377c88f3f6SAchin Gupta	mrs	x0, mpidr_el1
1387c88f3f6SAchin Gupta	bl	platform_set_stack
1397c88f3f6SAchin Gupta
1407c88f3f6SAchin Gupta	/* ---------------------------------------------
1417c88f3f6SAchin Gupta	 * Jump to main function.
1427c88f3f6SAchin Gupta	 * ---------------------------------------------
1437c88f3f6SAchin Gupta	 */
1447c88f3f6SAchin Gupta	bl	tsp_main
1457c88f3f6SAchin Gupta
1467c88f3f6SAchin Gupta	/* ---------------------------------------------
1477c88f3f6SAchin Gupta	 * Tell TSPD that we are done initialising
1487c88f3f6SAchin Gupta	 * ---------------------------------------------
1497c88f3f6SAchin Gupta	 */
1507c88f3f6SAchin Gupta	mov	x1, x0
1517c88f3f6SAchin Gupta	mov	x0, #TSP_ENTRY_DONE
1527c88f3f6SAchin Gupta	smc	#0
1537c88f3f6SAchin Gupta
1547c88f3f6SAchin Guptatsp_entrypoint_panic:
1557c88f3f6SAchin Gupta	b	tsp_entrypoint_panic
1567c88f3f6SAchin Gupta
1577c88f3f6SAchin Gupta	/*---------------------------------------------
1587c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
1597c88f3f6SAchin Gupta	 * cpu is to be turned off through a CPU_OFF
1607c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
1617c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
1627c88f3f6SAchin Gupta	 * implementation, the TSPD expects the TSP to
1637c88f3f6SAchin Gupta	 * re-initialise its state so nothing is done
1647c88f3f6SAchin Gupta	 * here except for acknowledging the request.
1657c88f3f6SAchin Gupta	 * ---------------------------------------------
1667c88f3f6SAchin Gupta	 */
1670a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry
1687c88f3f6SAchin Gupta	bl	tsp_cpu_off_main
1697c88f3f6SAchin Gupta	restore_args_call_smc
1707c88f3f6SAchin Gupta
1717c88f3f6SAchin Gupta	/*---------------------------------------------
1727c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
1737c88f3f6SAchin Gupta	 * cpu is turned on using a CPU_ON psci call to
1747c88f3f6SAchin Gupta	 * ask the TSP to initialise itself i.e. setup
1757c88f3f6SAchin Gupta	 * the mmu, stacks etc. Minimal architectural
1767c88f3f6SAchin Gupta	 * state will be initialised by the TSPD when
1777c88f3f6SAchin Gupta	 * this function is entered i.e. Caches and MMU
1787c88f3f6SAchin Gupta	 * will be turned off, the execution state
1797c88f3f6SAchin Gupta	 * will be aarch64 and exceptions masked.
1807c88f3f6SAchin Gupta	 * ---------------------------------------------
1817c88f3f6SAchin Gupta	 */
1820a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry
1837c88f3f6SAchin Gupta	/* ---------------------------------------------
1847c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
1857c88f3f6SAchin Gupta	 * ---------------------------------------------
1867c88f3f6SAchin Gupta	 */
1877c88f3f6SAchin Gupta	adr	x0, early_exceptions
1887c88f3f6SAchin Gupta	msr	vbar_el1, x0
1897c88f3f6SAchin Gupta
1907c88f3f6SAchin Gupta	/* ---------------------------------------------
1917c88f3f6SAchin Gupta	 * Enable the instruction cache.
1927c88f3f6SAchin Gupta	 * ---------------------------------------------
1937c88f3f6SAchin Gupta	 */
1947c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
1957c88f3f6SAchin Gupta	orr	x0, x0, #SCTLR_I_BIT
1967c88f3f6SAchin Gupta	msr	sctlr_el1, x0
1977c88f3f6SAchin Gupta	isb
1987c88f3f6SAchin Gupta
1997c88f3f6SAchin Gupta	/* --------------------------------------------
2007c88f3f6SAchin Gupta	 * Give ourselves a small coherent stack to
2017c88f3f6SAchin Gupta	 * ease the pain of initializing the MMU
2027c88f3f6SAchin Gupta	 * --------------------------------------------
2037c88f3f6SAchin Gupta	 */
2047c88f3f6SAchin Gupta	mrs	x0, mpidr_el1
2057c88f3f6SAchin Gupta	bl	platform_set_coherent_stack
2067c88f3f6SAchin Gupta
2077c88f3f6SAchin Gupta	/* ---------------------------------------------
2087c88f3f6SAchin Gupta	 * Initialise the MMU
2097c88f3f6SAchin Gupta	 * ---------------------------------------------
2107c88f3f6SAchin Gupta	 */
211b793e431SSandrine Bailleux	bl	enable_mmu_el1
2127c88f3f6SAchin Gupta
2137c88f3f6SAchin Gupta	/* ---------------------------------------------
2147c88f3f6SAchin Gupta	 * Give ourselves a stack allocated in Normal
2157c88f3f6SAchin Gupta	 * -IS-WBWA memory
2167c88f3f6SAchin Gupta	 * ---------------------------------------------
2177c88f3f6SAchin Gupta	 */
2187c88f3f6SAchin Gupta	mrs	x0, mpidr_el1
2197c88f3f6SAchin Gupta	bl	platform_set_stack
2207c88f3f6SAchin Gupta
2217c88f3f6SAchin Gupta	/* ---------------------------------------------
2227c88f3f6SAchin Gupta	 * Enter C runtime to perform any remaining
2237c88f3f6SAchin Gupta	 * book keeping
2247c88f3f6SAchin Gupta	 * ---------------------------------------------
2257c88f3f6SAchin Gupta	 */
2267c88f3f6SAchin Gupta	bl	tsp_cpu_on_main
2277c88f3f6SAchin Gupta	restore_args_call_smc
2287c88f3f6SAchin Gupta
2297c88f3f6SAchin Gupta	/* Should never reach here */
2307c88f3f6SAchin Guptatsp_cpu_on_entry_panic:
2317c88f3f6SAchin Gupta	b	tsp_cpu_on_entry_panic
2327c88f3f6SAchin Gupta
2337c88f3f6SAchin Gupta	/*---------------------------------------------
2347c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
2357c88f3f6SAchin Gupta	 * cpu is to be suspended through a CPU_SUSPEND
2367c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
2377c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
2387c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
2397c88f3f6SAchin Gupta	 * the EL1 state.
2407c88f3f6SAchin Gupta	 * ---------------------------------------------
2417c88f3f6SAchin Gupta	 */
2420a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry
2437c88f3f6SAchin Gupta	bl	tsp_cpu_suspend_main
2447c88f3f6SAchin Gupta	restore_args_call_smc
2457c88f3f6SAchin Gupta
2467c88f3f6SAchin Gupta	/*---------------------------------------------
247*6cf89021SAchin Gupta	 * This entrypoint is used by the TSPD to pass
248*6cf89021SAchin Gupta	 * control for handling a pending S-EL1 FIQ.
249*6cf89021SAchin Gupta	 * 'x0' contains a magic number which indicates
250*6cf89021SAchin Gupta	 * this. TSPD expects control to be handed back
251*6cf89021SAchin Gupta	 * at the end of FIQ processing. This is done
252*6cf89021SAchin Gupta	 * through an SMC. The handover agreement is:
253*6cf89021SAchin Gupta	 *
254*6cf89021SAchin Gupta	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
255*6cf89021SAchin Gupta	 *    the ELR_EL3 from the non-secure state.
256*6cf89021SAchin Gupta	 * 2. TSP has to preserve the callee saved
257*6cf89021SAchin Gupta	 *    general purpose registers, SP_EL1/EL0 and
258*6cf89021SAchin Gupta	 *    LR.
259*6cf89021SAchin Gupta	 * 3. TSP has to preserve the system and vfp
260*6cf89021SAchin Gupta	 *    registers (if applicable).
261*6cf89021SAchin Gupta	 * 4. TSP can use 'x0-x18' to enable its C
262*6cf89021SAchin Gupta	 *    runtime.
263*6cf89021SAchin Gupta	 * 5. TSP returns to TSPD using an SMC with
264*6cf89021SAchin Gupta	 *    'x0' = TSP_HANDLED_S_EL1_FIQ
265*6cf89021SAchin Gupta	 * ---------------------------------------------
266*6cf89021SAchin Gupta	 */
267*6cf89021SAchin Guptafunc	tsp_fiq_entry
268*6cf89021SAchin Gupta#if DEBUG
269*6cf89021SAchin Gupta	mov	x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff)
270*6cf89021SAchin Gupta	movk	x2, #(TSP_HANDLE_FIQ_AND_RETURN &  0xffff)
271*6cf89021SAchin Gupta	cmp	x0, x2
272*6cf89021SAchin Gupta	b.ne	tsp_fiq_entry_panic
273*6cf89021SAchin Gupta#endif
274*6cf89021SAchin Gupta	/*---------------------------------------------
275*6cf89021SAchin Gupta	 * Save any previous context needed to perform
276*6cf89021SAchin Gupta	 * an exception return from S-EL1 e.g. context
277*6cf89021SAchin Gupta	 * from a previous IRQ. Update statistics and
278*6cf89021SAchin Gupta	 * handle the FIQ before returning to the TSPD.
279*6cf89021SAchin Gupta	 * IRQ/FIQs are not enabled since that will
280*6cf89021SAchin Gupta	 * complicate the implementation. Execution
281*6cf89021SAchin Gupta	 * will be transferred back to the normal world
282*6cf89021SAchin Gupta	 * in any case. A non-zero return value from the
283*6cf89021SAchin Gupta	 * fiq handler is an error.
284*6cf89021SAchin Gupta	 * ---------------------------------------------
285*6cf89021SAchin Gupta	 */
286*6cf89021SAchin Gupta	save_eret_context x2 x3
287*6cf89021SAchin Gupta	bl	tsp_update_sync_fiq_stats
288*6cf89021SAchin Gupta	bl	tsp_fiq_handler
289*6cf89021SAchin Gupta	cbnz	x0, tsp_fiq_entry_panic
290*6cf89021SAchin Gupta	restore_eret_context x2 x3
291*6cf89021SAchin Gupta	mov	x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff)
292*6cf89021SAchin Gupta	movk	x0, #(TSP_HANDLED_S_EL1_FIQ &  0xffff)
293*6cf89021SAchin Gupta	smc	#0
294*6cf89021SAchin Gupta
295*6cf89021SAchin Guptatsp_fiq_entry_panic:
296*6cf89021SAchin Gupta	b	tsp_fiq_entry_panic
297*6cf89021SAchin Gupta
298*6cf89021SAchin Gupta	/*---------------------------------------------
2997c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
3007c88f3f6SAchin Gupta	 * cpu resumes execution after an earlier
3017c88f3f6SAchin Gupta	 * CPU_SUSPEND psci call to ask the TSP to
3027c88f3f6SAchin Gupta	 * restore its saved context. In the current
3037c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
3047c88f3f6SAchin Gupta	 * EL1 state so nothing is done here apart from
3057c88f3f6SAchin Gupta	 * acknowledging the request.
3067c88f3f6SAchin Gupta	 * ---------------------------------------------
3077c88f3f6SAchin Gupta	 */
3080a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry
3097c88f3f6SAchin Gupta	bl	tsp_cpu_resume_main
3107c88f3f6SAchin Gupta	restore_args_call_smc
3117c88f3f6SAchin Guptatsp_cpu_resume_panic:
3127c88f3f6SAchin Gupta	b	tsp_cpu_resume_panic
3137c88f3f6SAchin Gupta
3147c88f3f6SAchin Gupta	/*---------------------------------------------
3157c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD to ask
3167c88f3f6SAchin Gupta	 * the TSP to service a fast smc request.
3177c88f3f6SAchin Gupta	 * ---------------------------------------------
3187c88f3f6SAchin Gupta	 */
3190a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry
3207c88f3f6SAchin Gupta	bl	tsp_fast_smc_handler
3217c88f3f6SAchin Gupta	restore_args_call_smc
3227c88f3f6SAchin Guptatsp_fast_smc_entry_panic:
3237c88f3f6SAchin Gupta	b	tsp_fast_smc_entry_panic
3247c88f3f6SAchin Gupta
325