xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision 64726e6d61e94203a1dc6aa3f49affd8f2165aec)
17c88f3f6SAchin Gupta/*
2308d359bSDouglas Raillard * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
57c88f3f6SAchin Gupta */
67c88f3f6SAchin Gupta
77c88f3f6SAchin Gupta#include <arch.h>
80a30cf54SAndrew Thoelke#include <asm_macros.S>
997043ac9SDan Handley#include <tsp.h>
10d50ece03SAntonio Nino Diaz#include <xlat_tables_defs.h>
11da0af78aSDan Handley#include "../tsp_private.h"
127c88f3f6SAchin Gupta
137c88f3f6SAchin Gupta
147c88f3f6SAchin Gupta	.globl	tsp_entrypoint
15399fb08fSAndrew Thoelke	.globl  tsp_vector_table
167c88f3f6SAchin Gupta
17239b04faSSoby Mathew
18239b04faSSoby Mathew
197c88f3f6SAchin Gupta	/* ---------------------------------------------
207c88f3f6SAchin Gupta	 * Populate the params in x0-x7 from the pointer
217c88f3f6SAchin Gupta	 * to the smc args structure in x0.
227c88f3f6SAchin Gupta	 * ---------------------------------------------
237c88f3f6SAchin Gupta	 */
247c88f3f6SAchin Gupta	.macro restore_args_call_smc
257c88f3f6SAchin Gupta	ldp	x6, x7, [x0, #TSP_ARG6]
267c88f3f6SAchin Gupta	ldp	x4, x5, [x0, #TSP_ARG4]
277c88f3f6SAchin Gupta	ldp	x2, x3, [x0, #TSP_ARG2]
287c88f3f6SAchin Gupta	ldp	x0, x1, [x0, #TSP_ARG0]
297c88f3f6SAchin Gupta	smc	#0
307c88f3f6SAchin Gupta	.endm
317c88f3f6SAchin Gupta
326cf89021SAchin Gupta	.macro	save_eret_context reg1 reg2
336cf89021SAchin Gupta	mrs	\reg1, elr_el1
346cf89021SAchin Gupta	mrs	\reg2, spsr_el1
356cf89021SAchin Gupta	stp	\reg1, \reg2, [sp, #-0x10]!
366cf89021SAchin Gupta	stp	x30, x18, [sp, #-0x10]!
376cf89021SAchin Gupta	.endm
386cf89021SAchin Gupta
396cf89021SAchin Gupta	.macro restore_eret_context reg1 reg2
406cf89021SAchin Gupta	ldp	x30, x18, [sp], #0x10
416cf89021SAchin Gupta	ldp	\reg1, \reg2, [sp], #0x10
426cf89021SAchin Gupta	msr	elr_el1, \reg1
436cf89021SAchin Gupta	msr	spsr_el1, \reg2
446cf89021SAchin Gupta	.endm
456cf89021SAchin Gupta
46*64726e6dSJulius Wernerfunc tsp_entrypoint _align=3
477c88f3f6SAchin Gupta
487c88f3f6SAchin Gupta	/* ---------------------------------------------
497c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
507c88f3f6SAchin Gupta	 * ---------------------------------------------
517c88f3f6SAchin Gupta	 */
5257356e90SAchin Gupta	adr	x0, tsp_exceptions
537c88f3f6SAchin Gupta	msr	vbar_el1, x0
540c8d4fefSAchin Gupta	isb
550c8d4fefSAchin Gupta
560c8d4fefSAchin Gupta	/* ---------------------------------------------
570c8d4fefSAchin Gupta	 * Enable the SError interrupt now that the
580c8d4fefSAchin Gupta	 * exception vectors have been setup.
590c8d4fefSAchin Gupta	 * ---------------------------------------------
600c8d4fefSAchin Gupta	 */
610c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
627c88f3f6SAchin Gupta
637c88f3f6SAchin Gupta	/* ---------------------------------------------
64ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
65ec3c1003SAchin Gupta	 * and data access alignment checks
667c88f3f6SAchin Gupta	 * ---------------------------------------------
677c88f3f6SAchin Gupta	 */
68ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
697c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
70ec3c1003SAchin Gupta	orr	x0, x0, x1
717c88f3f6SAchin Gupta	msr	sctlr_el1, x0
727c88f3f6SAchin Gupta	isb
737c88f3f6SAchin Gupta
747c88f3f6SAchin Gupta	/* ---------------------------------------------
7554dc71e7SAchin Gupta	 * Invalidate the RW memory used by the BL32
7654dc71e7SAchin Gupta	 * image. This includes the data and NOBITS
7754dc71e7SAchin Gupta	 * sections. This is done to safeguard against
7854dc71e7SAchin Gupta	 * possible corruption of this memory by dirty
7954dc71e7SAchin Gupta	 * cache lines in a system cache as a result of
8054dc71e7SAchin Gupta	 * use by an earlier boot loader stage.
8154dc71e7SAchin Gupta	 * ---------------------------------------------
8254dc71e7SAchin Gupta	 */
8354dc71e7SAchin Gupta	adr	x0, __RW_START__
8454dc71e7SAchin Gupta	adr	x1, __RW_END__
8554dc71e7SAchin Gupta	sub	x1, x1, x0
8654dc71e7SAchin Gupta	bl	inv_dcache_range
8754dc71e7SAchin Gupta
8854dc71e7SAchin Gupta	/* ---------------------------------------------
897c88f3f6SAchin Gupta	 * Zero out NOBITS sections. There are 2 of them:
907c88f3f6SAchin Gupta	 *   - the .bss section;
917c88f3f6SAchin Gupta	 *   - the coherent memory section.
927c88f3f6SAchin Gupta	 * ---------------------------------------------
937c88f3f6SAchin Gupta	 */
947c88f3f6SAchin Gupta	ldr	x0, =__BSS_START__
957c88f3f6SAchin Gupta	ldr	x1, =__BSS_SIZE__
96308d359bSDouglas Raillard	bl	zeromem
977c88f3f6SAchin Gupta
98ab8707e6SSoby Mathew#if USE_COHERENT_MEM
997c88f3f6SAchin Gupta	ldr	x0, =__COHERENT_RAM_START__
1007c88f3f6SAchin Gupta	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
101308d359bSDouglas Raillard	bl	zeromem
102ab8707e6SSoby Mathew#endif
1037c88f3f6SAchin Gupta
1047c88f3f6SAchin Gupta	/* --------------------------------------------
105754a2b7aSAchin Gupta	 * Allocate a stack whose memory will be marked
106754a2b7aSAchin Gupta	 * as Normal-IS-WBWA when the MMU is enabled.
107754a2b7aSAchin Gupta	 * There is no risk of reading stale stack
108754a2b7aSAchin Gupta	 * memory after enabling the MMU as only the
109754a2b7aSAchin Gupta	 * primary cpu is running at the moment.
1107c88f3f6SAchin Gupta	 * --------------------------------------------
1117c88f3f6SAchin Gupta	 */
112fd650ff6SSoby Mathew	bl	plat_set_my_stack
1137c88f3f6SAchin Gupta
1147c88f3f6SAchin Gupta	/* ---------------------------------------------
11551faada7SDouglas Raillard	 * Initialize the stack protector canary before
11651faada7SDouglas Raillard	 * any C code is called.
11751faada7SDouglas Raillard	 * ---------------------------------------------
11851faada7SDouglas Raillard	 */
11951faada7SDouglas Raillard#if STACK_PROTECTOR_ENABLED
12051faada7SDouglas Raillard	bl	update_stack_protector_canary
12151faada7SDouglas Raillard#endif
12251faada7SDouglas Raillard
12351faada7SDouglas Raillard	/* ---------------------------------------------
1247c88f3f6SAchin Gupta	 * Perform early platform setup & platform
1257c88f3f6SAchin Gupta	 * specific early arch. setup e.g. mmu setup
1267c88f3f6SAchin Gupta	 * ---------------------------------------------
1277c88f3f6SAchin Gupta	 */
1285a06bb7eSDan Handley	bl	tsp_early_platform_setup
1295a06bb7eSDan Handley	bl	tsp_plat_arch_setup
1307c88f3f6SAchin Gupta
1317c88f3f6SAchin Gupta	/* ---------------------------------------------
1327c88f3f6SAchin Gupta	 * Jump to main function.
1337c88f3f6SAchin Gupta	 * ---------------------------------------------
1347c88f3f6SAchin Gupta	 */
1357c88f3f6SAchin Gupta	bl	tsp_main
1367c88f3f6SAchin Gupta
1377c88f3f6SAchin Gupta	/* ---------------------------------------------
1387c88f3f6SAchin Gupta	 * Tell TSPD that we are done initialising
1397c88f3f6SAchin Gupta	 * ---------------------------------------------
1407c88f3f6SAchin Gupta	 */
1417c88f3f6SAchin Gupta	mov	x1, x0
1427c88f3f6SAchin Gupta	mov	x0, #TSP_ENTRY_DONE
1437c88f3f6SAchin Gupta	smc	#0
1447c88f3f6SAchin Gupta
1457c88f3f6SAchin Guptatsp_entrypoint_panic:
1467c88f3f6SAchin Gupta	b	tsp_entrypoint_panic
1478b779620SKévin Petitendfunc tsp_entrypoint
1487c88f3f6SAchin Gupta
149399fb08fSAndrew Thoelke
150399fb08fSAndrew Thoelke	/* -------------------------------------------
151399fb08fSAndrew Thoelke	 * Table of entrypoint vectors provided to the
152399fb08fSAndrew Thoelke	 * TSPD for the various entrypoints
153399fb08fSAndrew Thoelke	 * -------------------------------------------
154399fb08fSAndrew Thoelke	 */
155399fb08fSAndrew Thoelkefunc tsp_vector_table
15616292f54SDavid Cunado	b	tsp_yield_smc_entry
157399fb08fSAndrew Thoelke	b	tsp_fast_smc_entry
158399fb08fSAndrew Thoelke	b	tsp_cpu_on_entry
159399fb08fSAndrew Thoelke	b	tsp_cpu_off_entry
160399fb08fSAndrew Thoelke	b	tsp_cpu_resume_entry
161399fb08fSAndrew Thoelke	b	tsp_cpu_suspend_entry
16202446137SSoby Mathew	b	tsp_sel1_intr_entry
163d5f13093SJuan Castillo	b	tsp_system_off_entry
164d5f13093SJuan Castillo	b	tsp_system_reset_entry
16516292f54SDavid Cunado	b	tsp_abort_yield_smc_entry
1668b779620SKévin Petitendfunc tsp_vector_table
167399fb08fSAndrew Thoelke
1687c88f3f6SAchin Gupta	/*---------------------------------------------
1697c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
1707c88f3f6SAchin Gupta	 * cpu is to be turned off through a CPU_OFF
1717c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
1727c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
1737c88f3f6SAchin Gupta	 * implementation, the TSPD expects the TSP to
1747c88f3f6SAchin Gupta	 * re-initialise its state so nothing is done
1757c88f3f6SAchin Gupta	 * here except for acknowledging the request.
1767c88f3f6SAchin Gupta	 * ---------------------------------------------
1777c88f3f6SAchin Gupta	 */
1780a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry
1797c88f3f6SAchin Gupta	bl	tsp_cpu_off_main
1807c88f3f6SAchin Gupta	restore_args_call_smc
1818b779620SKévin Petitendfunc tsp_cpu_off_entry
1827c88f3f6SAchin Gupta
1837c88f3f6SAchin Gupta	/*---------------------------------------------
184d5f13093SJuan Castillo	 * This entrypoint is used by the TSPD when the
185d5f13093SJuan Castillo	 * system is about to be switched off (through
186d5f13093SJuan Castillo	 * a SYSTEM_OFF psci call) to ask the TSP to
187d5f13093SJuan Castillo	 * perform any necessary bookkeeping.
188d5f13093SJuan Castillo	 * ---------------------------------------------
189d5f13093SJuan Castillo	 */
190d5f13093SJuan Castillofunc tsp_system_off_entry
191d5f13093SJuan Castillo	bl	tsp_system_off_main
192d5f13093SJuan Castillo	restore_args_call_smc
1938b779620SKévin Petitendfunc tsp_system_off_entry
194d5f13093SJuan Castillo
195d5f13093SJuan Castillo	/*---------------------------------------------
196d5f13093SJuan Castillo	 * This entrypoint is used by the TSPD when the
197d5f13093SJuan Castillo	 * system is about to be reset (through a
198d5f13093SJuan Castillo	 * SYSTEM_RESET psci call) to ask the TSP to
199d5f13093SJuan Castillo	 * perform any necessary bookkeeping.
200d5f13093SJuan Castillo	 * ---------------------------------------------
201d5f13093SJuan Castillo	 */
202d5f13093SJuan Castillofunc tsp_system_reset_entry
203d5f13093SJuan Castillo	bl	tsp_system_reset_main
204d5f13093SJuan Castillo	restore_args_call_smc
2058b779620SKévin Petitendfunc tsp_system_reset_entry
206d5f13093SJuan Castillo
207d5f13093SJuan Castillo	/*---------------------------------------------
2087c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
2097c88f3f6SAchin Gupta	 * cpu is turned on using a CPU_ON psci call to
2107c88f3f6SAchin Gupta	 * ask the TSP to initialise itself i.e. setup
2117c88f3f6SAchin Gupta	 * the mmu, stacks etc. Minimal architectural
2127c88f3f6SAchin Gupta	 * state will be initialised by the TSPD when
2137c88f3f6SAchin Gupta	 * this function is entered i.e. Caches and MMU
2147c88f3f6SAchin Gupta	 * will be turned off, the execution state
2157c88f3f6SAchin Gupta	 * will be aarch64 and exceptions masked.
2167c88f3f6SAchin Gupta	 * ---------------------------------------------
2177c88f3f6SAchin Gupta	 */
2180a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry
2197c88f3f6SAchin Gupta	/* ---------------------------------------------
2207c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
2217c88f3f6SAchin Gupta	 * ---------------------------------------------
2227c88f3f6SAchin Gupta	 */
22357356e90SAchin Gupta	adr	x0, tsp_exceptions
2247c88f3f6SAchin Gupta	msr	vbar_el1, x0
2250c8d4fefSAchin Gupta	isb
2260c8d4fefSAchin Gupta
2270c8d4fefSAchin Gupta	/* Enable the SError interrupt */
2280c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
2297c88f3f6SAchin Gupta
2307c88f3f6SAchin Gupta	/* ---------------------------------------------
231ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
232ec3c1003SAchin Gupta	 * and data access alignment checks
2337c88f3f6SAchin Gupta	 * ---------------------------------------------
2347c88f3f6SAchin Gupta	 */
235ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
2367c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
237ec3c1003SAchin Gupta	orr	x0, x0, x1
2387c88f3f6SAchin Gupta	msr	sctlr_el1, x0
2397c88f3f6SAchin Gupta	isb
2407c88f3f6SAchin Gupta
2417c88f3f6SAchin Gupta	/* --------------------------------------------
242b51da821SAchin Gupta	 * Give ourselves a stack whose memory will be
243b51da821SAchin Gupta	 * marked as Normal-IS-WBWA when the MMU is
244b51da821SAchin Gupta	 * enabled.
2457c88f3f6SAchin Gupta	 * --------------------------------------------
2467c88f3f6SAchin Gupta	 */
247fd650ff6SSoby Mathew	bl	plat_set_my_stack
2487c88f3f6SAchin Gupta
249b51da821SAchin Gupta	/* --------------------------------------------
250b51da821SAchin Gupta	 * Enable the MMU with the DCache disabled. It
251b51da821SAchin Gupta	 * is safe to use stacks allocated in normal
252b51da821SAchin Gupta	 * memory as a result. All memory accesses are
253b51da821SAchin Gupta	 * marked nGnRnE when the MMU is disabled. So
254b51da821SAchin Gupta	 * all the stack writes will make it to memory.
255b51da821SAchin Gupta	 * All memory accesses are marked Non-cacheable
256b51da821SAchin Gupta	 * when the MMU is enabled but D$ is disabled.
257b51da821SAchin Gupta	 * So used stack memory is guaranteed to be
258b51da821SAchin Gupta	 * visible immediately after the MMU is enabled
259b51da821SAchin Gupta	 * Enabling the DCache at the same time as the
260b51da821SAchin Gupta	 * MMU can lead to speculatively fetched and
261b51da821SAchin Gupta	 * possibly stale stack memory being read from
262b51da821SAchin Gupta	 * other caches. This can lead to coherency
263b51da821SAchin Gupta	 * issues.
264b51da821SAchin Gupta	 * --------------------------------------------
2657c88f3f6SAchin Gupta	 */
266b51da821SAchin Gupta	mov	x0, #DISABLE_DCACHE
267dff8e47aSDan Handley	bl	bl32_plat_enable_mmu
2687c88f3f6SAchin Gupta
2697c88f3f6SAchin Gupta	/* ---------------------------------------------
270b51da821SAchin Gupta	 * Enable the Data cache now that the MMU has
271b51da821SAchin Gupta	 * been enabled. The stack has been unwound. It
272b51da821SAchin Gupta	 * will be written first before being read. This
273b51da821SAchin Gupta	 * will invalidate any stale cache lines resi-
274b51da821SAchin Gupta	 * -dent in other caches. We assume that
275b51da821SAchin Gupta	 * interconnect coherency has been enabled for
276b51da821SAchin Gupta	 * this cluster by EL3 firmware.
2777c88f3f6SAchin Gupta	 * ---------------------------------------------
2787c88f3f6SAchin Gupta	 */
279b51da821SAchin Gupta	mrs	x0, sctlr_el1
280b51da821SAchin Gupta	orr	x0, x0, #SCTLR_C_BIT
281b51da821SAchin Gupta	msr	sctlr_el1, x0
282b51da821SAchin Gupta	isb
2837c88f3f6SAchin Gupta
2847c88f3f6SAchin Gupta	/* ---------------------------------------------
2857c88f3f6SAchin Gupta	 * Enter C runtime to perform any remaining
2867c88f3f6SAchin Gupta	 * book keeping
2877c88f3f6SAchin Gupta	 * ---------------------------------------------
2887c88f3f6SAchin Gupta	 */
2897c88f3f6SAchin Gupta	bl	tsp_cpu_on_main
2907c88f3f6SAchin Gupta	restore_args_call_smc
2917c88f3f6SAchin Gupta
2927c88f3f6SAchin Gupta	/* Should never reach here */
2937c88f3f6SAchin Guptatsp_cpu_on_entry_panic:
2947c88f3f6SAchin Gupta	b	tsp_cpu_on_entry_panic
2958b779620SKévin Petitendfunc tsp_cpu_on_entry
2967c88f3f6SAchin Gupta
2977c88f3f6SAchin Gupta	/*---------------------------------------------
2987c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
2997c88f3f6SAchin Gupta	 * cpu is to be suspended through a CPU_SUSPEND
3007c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
3017c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
3027c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
3037c88f3f6SAchin Gupta	 * the EL1 state.
3047c88f3f6SAchin Gupta	 * ---------------------------------------------
3057c88f3f6SAchin Gupta	 */
3060a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry
3077c88f3f6SAchin Gupta	bl	tsp_cpu_suspend_main
3087c88f3f6SAchin Gupta	restore_args_call_smc
3098b779620SKévin Petitendfunc tsp_cpu_suspend_entry
3107c88f3f6SAchin Gupta
31102446137SSoby Mathew	/*-------------------------------------------------
3126cf89021SAchin Gupta	 * This entrypoint is used by the TSPD to pass
31363b8440fSSoby Mathew	 * control for `synchronously` handling a S-EL1
31463b8440fSSoby Mathew	 * Interrupt which was triggered while executing
31563b8440fSSoby Mathew	 * in normal world. 'x0' contains a magic number
31663b8440fSSoby Mathew	 * which indicates this. TSPD expects control to
31763b8440fSSoby Mathew	 * be handed back at the end of interrupt
31863b8440fSSoby Mathew	 * processing. This is done through an SMC.
31963b8440fSSoby Mathew	 * The handover agreement is:
3206cf89021SAchin Gupta	 *
3216cf89021SAchin Gupta	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
3226cf89021SAchin Gupta	 *    the ELR_EL3 from the non-secure state.
3236cf89021SAchin Gupta	 * 2. TSP has to preserve the callee saved
3246cf89021SAchin Gupta	 *    general purpose registers, SP_EL1/EL0 and
3256cf89021SAchin Gupta	 *    LR.
3266cf89021SAchin Gupta	 * 3. TSP has to preserve the system and vfp
3276cf89021SAchin Gupta	 *    registers (if applicable).
3286cf89021SAchin Gupta	 * 4. TSP can use 'x0-x18' to enable its C
3296cf89021SAchin Gupta	 *    runtime.
3306cf89021SAchin Gupta	 * 5. TSP returns to TSPD using an SMC with
33102446137SSoby Mathew	 *    'x0' = TSP_HANDLED_S_EL1_INTR
33202446137SSoby Mathew	 * ------------------------------------------------
3336cf89021SAchin Gupta	 */
33402446137SSoby Mathewfunc	tsp_sel1_intr_entry
3356cf89021SAchin Gupta#if DEBUG
33663b8440fSSoby Mathew	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
3376cf89021SAchin Gupta	cmp	x0, x2
33802446137SSoby Mathew	b.ne	tsp_sel1_int_entry_panic
3396cf89021SAchin Gupta#endif
34002446137SSoby Mathew	/*-------------------------------------------------
3416cf89021SAchin Gupta	 * Save any previous context needed to perform
3426cf89021SAchin Gupta	 * an exception return from S-EL1 e.g. context
34302446137SSoby Mathew	 * from a previous Non secure Interrupt.
34402446137SSoby Mathew	 * Update statistics and handle the S-EL1
34502446137SSoby Mathew	 * interrupt before returning to the TSPD.
3466cf89021SAchin Gupta	 * IRQ/FIQs are not enabled since that will
3476cf89021SAchin Gupta	 * complicate the implementation. Execution
3486cf89021SAchin Gupta	 * will be transferred back to the normal world
34963b8440fSSoby Mathew	 * in any case. The handler can return 0
35063b8440fSSoby Mathew	 * if the interrupt was handled or TSP_PREEMPTED
35163b8440fSSoby Mathew	 * if the expected interrupt was preempted
35263b8440fSSoby Mathew	 * by an interrupt that should be handled in EL3
35363b8440fSSoby Mathew	 * e.g. Group 0 interrupt in GICv3. In both
35463b8440fSSoby Mathew	 * the cases switch to EL3 using SMC with id
35563b8440fSSoby Mathew	 * TSP_HANDLED_S_EL1_INTR. Any other return value
35663b8440fSSoby Mathew	 * from the handler will result in panic.
35702446137SSoby Mathew	 * ------------------------------------------------
3586cf89021SAchin Gupta	 */
3596cf89021SAchin Gupta	save_eret_context x2 x3
36002446137SSoby Mathew	bl	tsp_update_sync_sel1_intr_stats
36102446137SSoby Mathew	bl	tsp_common_int_handler
36263b8440fSSoby Mathew	/* Check if the S-EL1 interrupt has been handled */
36363b8440fSSoby Mathew	cbnz	x0, tsp_sel1_intr_check_preemption
36463b8440fSSoby Mathew	b	tsp_sel1_intr_return
36563b8440fSSoby Mathewtsp_sel1_intr_check_preemption:
36663b8440fSSoby Mathew	/* Check if the S-EL1 interrupt has been preempted */
36763b8440fSSoby Mathew	mov_imm	x1, TSP_PREEMPTED
36863b8440fSSoby Mathew	cmp	x0, x1
36963b8440fSSoby Mathew	b.ne	tsp_sel1_int_entry_panic
37063b8440fSSoby Mathewtsp_sel1_intr_return:
37163b8440fSSoby Mathew	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
3726cf89021SAchin Gupta	restore_eret_context x2 x3
3736cf89021SAchin Gupta	smc	#0
3746cf89021SAchin Gupta
37563b8440fSSoby Mathew	/* Should never reach here */
37602446137SSoby Mathewtsp_sel1_int_entry_panic:
377a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
37802446137SSoby Mathewendfunc tsp_sel1_intr_entry
3796cf89021SAchin Gupta
3806cf89021SAchin Gupta	/*---------------------------------------------
3817c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
3827c88f3f6SAchin Gupta	 * cpu resumes execution after an earlier
3837c88f3f6SAchin Gupta	 * CPU_SUSPEND psci call to ask the TSP to
3847c88f3f6SAchin Gupta	 * restore its saved context. In the current
3857c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
3867c88f3f6SAchin Gupta	 * EL1 state so nothing is done here apart from
3877c88f3f6SAchin Gupta	 * acknowledging the request.
3887c88f3f6SAchin Gupta	 * ---------------------------------------------
3897c88f3f6SAchin Gupta	 */
3900a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry
3917c88f3f6SAchin Gupta	bl	tsp_cpu_resume_main
3927c88f3f6SAchin Gupta	restore_args_call_smc
3931c3ea103SAntonio Nino Diaz
3941c3ea103SAntonio Nino Diaz	/* Should never reach here */
395a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
3968b779620SKévin Petitendfunc tsp_cpu_resume_entry
3977c88f3f6SAchin Gupta
3987c88f3f6SAchin Gupta	/*---------------------------------------------
3997c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD to ask
4007c88f3f6SAchin Gupta	 * the TSP to service a fast smc request.
4017c88f3f6SAchin Gupta	 * ---------------------------------------------
4027c88f3f6SAchin Gupta	 */
4030a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry
404239b04faSSoby Mathew	bl	tsp_smc_handler
4057c88f3f6SAchin Gupta	restore_args_call_smc
4061c3ea103SAntonio Nino Diaz
4071c3ea103SAntonio Nino Diaz	/* Should never reach here */
408a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
4098b779620SKévin Petitendfunc tsp_fast_smc_entry
4107c88f3f6SAchin Gupta
411239b04faSSoby Mathew	/*---------------------------------------------
412239b04faSSoby Mathew	 * This entrypoint is used by the TSPD to ask
41316292f54SDavid Cunado	 * the TSP to service a Yielding SMC request.
414239b04faSSoby Mathew	 * We will enable preemption during execution
415239b04faSSoby Mathew	 * of tsp_smc_handler.
416239b04faSSoby Mathew	 * ---------------------------------------------
417239b04faSSoby Mathew	 */
41816292f54SDavid Cunadofunc tsp_yield_smc_entry
419239b04faSSoby Mathew	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
420239b04faSSoby Mathew	bl	tsp_smc_handler
421239b04faSSoby Mathew	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
422239b04faSSoby Mathew	restore_args_call_smc
4231c3ea103SAntonio Nino Diaz
4241c3ea103SAntonio Nino Diaz	/* Should never reach here */
425a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
42616292f54SDavid Cunadoendfunc tsp_yield_smc_entry
4273df6012aSDouglas Raillard
4283df6012aSDouglas Raillard	/*---------------------------------------------------------------------
42916292f54SDavid Cunado	 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
4303df6012aSDouglas Raillard	 * SMC. It could be on behalf of non-secure world or because a CPU
4313df6012aSDouglas Raillard	 * suspend/CPU off request needs to abort the preempted SMC.
4323df6012aSDouglas Raillard	 * --------------------------------------------------------------------
4333df6012aSDouglas Raillard	 */
43416292f54SDavid Cunadofunc tsp_abort_yield_smc_entry
4353df6012aSDouglas Raillard
4363df6012aSDouglas Raillard	/*
4373df6012aSDouglas Raillard	 * Exceptions masking is already done by the TSPD when entering this
4383df6012aSDouglas Raillard	 * hook so there is no need to do it here.
4393df6012aSDouglas Raillard	 */
4403df6012aSDouglas Raillard
4413df6012aSDouglas Raillard	/* Reset the stack used by the pre-empted SMC */
4423df6012aSDouglas Raillard	bl	plat_set_my_stack
4433df6012aSDouglas Raillard
4443df6012aSDouglas Raillard	/*
4453df6012aSDouglas Raillard	 * Allow some cleanup such as releasing locks.
4463df6012aSDouglas Raillard	 */
4473df6012aSDouglas Raillard	bl	tsp_abort_smc_handler
4483df6012aSDouglas Raillard
4493df6012aSDouglas Raillard	restore_args_call_smc
4503df6012aSDouglas Raillard
4513df6012aSDouglas Raillard	/* Should never reach here */
4523df6012aSDouglas Raillard	bl	plat_panic_handler
45316292f54SDavid Cunadoendfunc tsp_abort_yield_smc_entry
454