xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision d974301d221762a7a0c24bf1d682fa8fe198a83d)
17c88f3f6SAchin Gupta/*
2*d974301dSMasahiro Yamada * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
57c88f3f6SAchin Gupta */
67c88f3f6SAchin Gupta
7*d974301dSMasahiro Yamada#include <platform_def.h>
8*d974301dSMasahiro Yamada
97c88f3f6SAchin Gupta#include <arch.h>
100a30cf54SAndrew Thoelke#include <asm_macros.S>
1109d40e0eSAntonio Nino Diaz#include <bl32/tsp/tsp.h>
1209d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h>
1309d40e0eSAntonio Nino Diaz
14da0af78aSDan Handley#include "../tsp_private.h"
157c88f3f6SAchin Gupta
167c88f3f6SAchin Gupta
177c88f3f6SAchin Gupta	.globl	tsp_entrypoint
18399fb08fSAndrew Thoelke	.globl  tsp_vector_table
197c88f3f6SAchin Gupta
20239b04faSSoby Mathew
21239b04faSSoby Mathew
227c88f3f6SAchin Gupta	/* ---------------------------------------------
237c88f3f6SAchin Gupta	 * Populate the params in x0-x7 from the pointer
247c88f3f6SAchin Gupta	 * to the smc args structure in x0.
257c88f3f6SAchin Gupta	 * ---------------------------------------------
267c88f3f6SAchin Gupta	 */
277c88f3f6SAchin Gupta	.macro restore_args_call_smc
287c88f3f6SAchin Gupta	ldp	x6, x7, [x0, #TSP_ARG6]
297c88f3f6SAchin Gupta	ldp	x4, x5, [x0, #TSP_ARG4]
307c88f3f6SAchin Gupta	ldp	x2, x3, [x0, #TSP_ARG2]
317c88f3f6SAchin Gupta	ldp	x0, x1, [x0, #TSP_ARG0]
327c88f3f6SAchin Gupta	smc	#0
337c88f3f6SAchin Gupta	.endm
347c88f3f6SAchin Gupta
356cf89021SAchin Gupta	.macro	save_eret_context reg1 reg2
366cf89021SAchin Gupta	mrs	\reg1, elr_el1
376cf89021SAchin Gupta	mrs	\reg2, spsr_el1
386cf89021SAchin Gupta	stp	\reg1, \reg2, [sp, #-0x10]!
396cf89021SAchin Gupta	stp	x30, x18, [sp, #-0x10]!
406cf89021SAchin Gupta	.endm
416cf89021SAchin Gupta
426cf89021SAchin Gupta	.macro restore_eret_context reg1 reg2
436cf89021SAchin Gupta	ldp	x30, x18, [sp], #0x10
446cf89021SAchin Gupta	ldp	\reg1, \reg2, [sp], #0x10
456cf89021SAchin Gupta	msr	elr_el1, \reg1
466cf89021SAchin Gupta	msr	spsr_el1, \reg2
476cf89021SAchin Gupta	.endm
486cf89021SAchin Gupta
4964726e6dSJulius Wernerfunc tsp_entrypoint _align=3
507c88f3f6SAchin Gupta
51*d974301dSMasahiro Yamada#if ENABLE_PIE
52*d974301dSMasahiro Yamada		/*
53*d974301dSMasahiro Yamada		 * ------------------------------------------------------------
54*d974301dSMasahiro Yamada		 * If PIE is enabled fixup the Global descriptor Table only
55*d974301dSMasahiro Yamada		 * once during primary core cold boot path.
56*d974301dSMasahiro Yamada		 *
57*d974301dSMasahiro Yamada		 * Compile time base address, required for fixup, is calculated
58*d974301dSMasahiro Yamada		 * using "pie_fixup" label present within first page.
59*d974301dSMasahiro Yamada		 * ------------------------------------------------------------
60*d974301dSMasahiro Yamada		 */
61*d974301dSMasahiro Yamada	pie_fixup:
62*d974301dSMasahiro Yamada		ldr	x0, =pie_fixup
63*d974301dSMasahiro Yamada		and	x0, x0, #~(PAGE_SIZE - 1)
64*d974301dSMasahiro Yamada		mov_imm	x1, (BL32_LIMIT - BL32_BASE)
65*d974301dSMasahiro Yamada		add	x1, x1, x0
66*d974301dSMasahiro Yamada		bl	fixup_gdt_reloc
67*d974301dSMasahiro Yamada#endif /* ENABLE_PIE */
68*d974301dSMasahiro Yamada
697c88f3f6SAchin Gupta	/* ---------------------------------------------
707c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
717c88f3f6SAchin Gupta	 * ---------------------------------------------
727c88f3f6SAchin Gupta	 */
7357356e90SAchin Gupta	adr	x0, tsp_exceptions
747c88f3f6SAchin Gupta	msr	vbar_el1, x0
750c8d4fefSAchin Gupta	isb
760c8d4fefSAchin Gupta
770c8d4fefSAchin Gupta	/* ---------------------------------------------
780c8d4fefSAchin Gupta	 * Enable the SError interrupt now that the
790c8d4fefSAchin Gupta	 * exception vectors have been setup.
800c8d4fefSAchin Gupta	 * ---------------------------------------------
810c8d4fefSAchin Gupta	 */
820c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
837c88f3f6SAchin Gupta
847c88f3f6SAchin Gupta	/* ---------------------------------------------
85ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
8602b57943SJohn Tsichritzis	 * and data access alignment checks and disable
8702b57943SJohn Tsichritzis	 * speculative loads.
887c88f3f6SAchin Gupta	 * ---------------------------------------------
897c88f3f6SAchin Gupta	 */
90ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
917c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
92ec3c1003SAchin Gupta	orr	x0, x0, x1
9302b57943SJohn Tsichritzis	bic	x0, x0, #SCTLR_DSSBS_BIT
947c88f3f6SAchin Gupta	msr	sctlr_el1, x0
957c88f3f6SAchin Gupta	isb
967c88f3f6SAchin Gupta
977c88f3f6SAchin Gupta	/* ---------------------------------------------
9854dc71e7SAchin Gupta	 * Invalidate the RW memory used by the BL32
9954dc71e7SAchin Gupta	 * image. This includes the data and NOBITS
10054dc71e7SAchin Gupta	 * sections. This is done to safeguard against
10154dc71e7SAchin Gupta	 * possible corruption of this memory by dirty
10254dc71e7SAchin Gupta	 * cache lines in a system cache as a result of
10354dc71e7SAchin Gupta	 * use by an earlier boot loader stage.
10454dc71e7SAchin Gupta	 * ---------------------------------------------
10554dc71e7SAchin Gupta	 */
10654dc71e7SAchin Gupta	adr	x0, __RW_START__
10754dc71e7SAchin Gupta	adr	x1, __RW_END__
10854dc71e7SAchin Gupta	sub	x1, x1, x0
10954dc71e7SAchin Gupta	bl	inv_dcache_range
11054dc71e7SAchin Gupta
11154dc71e7SAchin Gupta	/* ---------------------------------------------
1127c88f3f6SAchin Gupta	 * Zero out NOBITS sections. There are 2 of them:
1137c88f3f6SAchin Gupta	 *   - the .bss section;
1147c88f3f6SAchin Gupta	 *   - the coherent memory section.
1157c88f3f6SAchin Gupta	 * ---------------------------------------------
1167c88f3f6SAchin Gupta	 */
1177c88f3f6SAchin Gupta	ldr	x0, =__BSS_START__
1187c88f3f6SAchin Gupta	ldr	x1, =__BSS_SIZE__
119308d359bSDouglas Raillard	bl	zeromem
1207c88f3f6SAchin Gupta
121ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1227c88f3f6SAchin Gupta	ldr	x0, =__COHERENT_RAM_START__
1237c88f3f6SAchin Gupta	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
124308d359bSDouglas Raillard	bl	zeromem
125ab8707e6SSoby Mathew#endif
1267c88f3f6SAchin Gupta
1277c88f3f6SAchin Gupta	/* --------------------------------------------
128754a2b7aSAchin Gupta	 * Allocate a stack whose memory will be marked
129754a2b7aSAchin Gupta	 * as Normal-IS-WBWA when the MMU is enabled.
130754a2b7aSAchin Gupta	 * There is no risk of reading stale stack
131754a2b7aSAchin Gupta	 * memory after enabling the MMU as only the
132754a2b7aSAchin Gupta	 * primary cpu is running at the moment.
1337c88f3f6SAchin Gupta	 * --------------------------------------------
1347c88f3f6SAchin Gupta	 */
135fd650ff6SSoby Mathew	bl	plat_set_my_stack
1367c88f3f6SAchin Gupta
1377c88f3f6SAchin Gupta	/* ---------------------------------------------
13851faada7SDouglas Raillard	 * Initialize the stack protector canary before
13951faada7SDouglas Raillard	 * any C code is called.
14051faada7SDouglas Raillard	 * ---------------------------------------------
14151faada7SDouglas Raillard	 */
14251faada7SDouglas Raillard#if STACK_PROTECTOR_ENABLED
14351faada7SDouglas Raillard	bl	update_stack_protector_canary
14451faada7SDouglas Raillard#endif
14551faada7SDouglas Raillard
14651faada7SDouglas Raillard	/* ---------------------------------------------
14767b6ff9fSAntonio Nino Diaz	 * Perform TSP setup
1487c88f3f6SAchin Gupta	 * ---------------------------------------------
1497c88f3f6SAchin Gupta	 */
15067b6ff9fSAntonio Nino Diaz	bl	tsp_setup
15167b6ff9fSAntonio Nino Diaz
15267b6ff9fSAntonio Nino Diaz#if ENABLE_PAUTH
1539fc59639SAlexei Fedorov	/* ---------------------------------------------
154ed108b56SAlexei Fedorov	 * Program APIAKey_EL1
155ed108b56SAlexei Fedorov	 * and enable pointer authentication
1569fc59639SAlexei Fedorov	 * ---------------------------------------------
1579fc59639SAlexei Fedorov	 */
158ed108b56SAlexei Fedorov	bl	pauth_init_enable_el1
15967b6ff9fSAntonio Nino Diaz#endif /* ENABLE_PAUTH */
1607c88f3f6SAchin Gupta
1617c88f3f6SAchin Gupta	/* ---------------------------------------------
1627c88f3f6SAchin Gupta	 * Jump to main function.
1637c88f3f6SAchin Gupta	 * ---------------------------------------------
1647c88f3f6SAchin Gupta	 */
1657c88f3f6SAchin Gupta	bl	tsp_main
1667c88f3f6SAchin Gupta
1677c88f3f6SAchin Gupta	/* ---------------------------------------------
1687c88f3f6SAchin Gupta	 * Tell TSPD that we are done initialising
1697c88f3f6SAchin Gupta	 * ---------------------------------------------
1707c88f3f6SAchin Gupta	 */
1717c88f3f6SAchin Gupta	mov	x1, x0
1727c88f3f6SAchin Gupta	mov	x0, #TSP_ENTRY_DONE
1737c88f3f6SAchin Gupta	smc	#0
1747c88f3f6SAchin Gupta
1757c88f3f6SAchin Guptatsp_entrypoint_panic:
1767c88f3f6SAchin Gupta	b	tsp_entrypoint_panic
1778b779620SKévin Petitendfunc tsp_entrypoint
1787c88f3f6SAchin Gupta
179399fb08fSAndrew Thoelke
180399fb08fSAndrew Thoelke	/* -------------------------------------------
181399fb08fSAndrew Thoelke	 * Table of entrypoint vectors provided to the
182399fb08fSAndrew Thoelke	 * TSPD for the various entrypoints
183399fb08fSAndrew Thoelke	 * -------------------------------------------
184399fb08fSAndrew Thoelke	 */
1859fc59639SAlexei Fedorovvector_base tsp_vector_table
18616292f54SDavid Cunado	b	tsp_yield_smc_entry
187399fb08fSAndrew Thoelke	b	tsp_fast_smc_entry
188399fb08fSAndrew Thoelke	b	tsp_cpu_on_entry
189399fb08fSAndrew Thoelke	b	tsp_cpu_off_entry
190399fb08fSAndrew Thoelke	b	tsp_cpu_resume_entry
191399fb08fSAndrew Thoelke	b	tsp_cpu_suspend_entry
19202446137SSoby Mathew	b	tsp_sel1_intr_entry
193d5f13093SJuan Castillo	b	tsp_system_off_entry
194d5f13093SJuan Castillo	b	tsp_system_reset_entry
19516292f54SDavid Cunado	b	tsp_abort_yield_smc_entry
196399fb08fSAndrew Thoelke
1977c88f3f6SAchin Gupta	/*---------------------------------------------
1987c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
1997c88f3f6SAchin Gupta	 * cpu is to be turned off through a CPU_OFF
2007c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
2017c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
2027c88f3f6SAchin Gupta	 * implementation, the TSPD expects the TSP to
2037c88f3f6SAchin Gupta	 * re-initialise its state so nothing is done
2047c88f3f6SAchin Gupta	 * here except for acknowledging the request.
2057c88f3f6SAchin Gupta	 * ---------------------------------------------
2067c88f3f6SAchin Gupta	 */
2070a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry
2087c88f3f6SAchin Gupta	bl	tsp_cpu_off_main
2097c88f3f6SAchin Gupta	restore_args_call_smc
2108b779620SKévin Petitendfunc tsp_cpu_off_entry
2117c88f3f6SAchin Gupta
2127c88f3f6SAchin Gupta	/*---------------------------------------------
213d5f13093SJuan Castillo	 * This entrypoint is used by the TSPD when the
214d5f13093SJuan Castillo	 * system is about to be switched off (through
215d5f13093SJuan Castillo	 * a SYSTEM_OFF psci call) to ask the TSP to
216d5f13093SJuan Castillo	 * perform any necessary bookkeeping.
217d5f13093SJuan Castillo	 * ---------------------------------------------
218d5f13093SJuan Castillo	 */
219d5f13093SJuan Castillofunc tsp_system_off_entry
220d5f13093SJuan Castillo	bl	tsp_system_off_main
221d5f13093SJuan Castillo	restore_args_call_smc
2228b779620SKévin Petitendfunc tsp_system_off_entry
223d5f13093SJuan Castillo
224d5f13093SJuan Castillo	/*---------------------------------------------
225d5f13093SJuan Castillo	 * This entrypoint is used by the TSPD when the
226d5f13093SJuan Castillo	 * system is about to be reset (through a
227d5f13093SJuan Castillo	 * SYSTEM_RESET psci call) to ask the TSP to
228d5f13093SJuan Castillo	 * perform any necessary bookkeeping.
229d5f13093SJuan Castillo	 * ---------------------------------------------
230d5f13093SJuan Castillo	 */
231d5f13093SJuan Castillofunc tsp_system_reset_entry
232d5f13093SJuan Castillo	bl	tsp_system_reset_main
233d5f13093SJuan Castillo	restore_args_call_smc
2348b779620SKévin Petitendfunc tsp_system_reset_entry
235d5f13093SJuan Castillo
236d5f13093SJuan Castillo	/*---------------------------------------------
2377c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
2387c88f3f6SAchin Gupta	 * cpu is turned on using a CPU_ON psci call to
2397c88f3f6SAchin Gupta	 * ask the TSP to initialise itself i.e. setup
2407c88f3f6SAchin Gupta	 * the mmu, stacks etc. Minimal architectural
2417c88f3f6SAchin Gupta	 * state will be initialised by the TSPD when
2427c88f3f6SAchin Gupta	 * this function is entered i.e. Caches and MMU
2437c88f3f6SAchin Gupta	 * will be turned off, the execution state
2447c88f3f6SAchin Gupta	 * will be aarch64 and exceptions masked.
2457c88f3f6SAchin Gupta	 * ---------------------------------------------
2467c88f3f6SAchin Gupta	 */
2470a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry
2487c88f3f6SAchin Gupta	/* ---------------------------------------------
2497c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
2507c88f3f6SAchin Gupta	 * ---------------------------------------------
2517c88f3f6SAchin Gupta	 */
25257356e90SAchin Gupta	adr	x0, tsp_exceptions
2537c88f3f6SAchin Gupta	msr	vbar_el1, x0
2540c8d4fefSAchin Gupta	isb
2550c8d4fefSAchin Gupta
2560c8d4fefSAchin Gupta	/* Enable the SError interrupt */
2570c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
2587c88f3f6SAchin Gupta
2597c88f3f6SAchin Gupta	/* ---------------------------------------------
260ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
261ec3c1003SAchin Gupta	 * and data access alignment checks
2627c88f3f6SAchin Gupta	 * ---------------------------------------------
2637c88f3f6SAchin Gupta	 */
264ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
2657c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
266ec3c1003SAchin Gupta	orr	x0, x0, x1
2677c88f3f6SAchin Gupta	msr	sctlr_el1, x0
2687c88f3f6SAchin Gupta	isb
2697c88f3f6SAchin Gupta
2707c88f3f6SAchin Gupta	/* --------------------------------------------
271b51da821SAchin Gupta	 * Give ourselves a stack whose memory will be
272b51da821SAchin Gupta	 * marked as Normal-IS-WBWA when the MMU is
273b51da821SAchin Gupta	 * enabled.
2747c88f3f6SAchin Gupta	 * --------------------------------------------
2757c88f3f6SAchin Gupta	 */
276fd650ff6SSoby Mathew	bl	plat_set_my_stack
2777c88f3f6SAchin Gupta
278b51da821SAchin Gupta	/* --------------------------------------------
279bb00ea5bSJeenu Viswambharan	 * Enable MMU and D-caches together.
280b51da821SAchin Gupta	 * --------------------------------------------
2817c88f3f6SAchin Gupta	 */
282bb00ea5bSJeenu Viswambharan	mov	x0, #0
283dff8e47aSDan Handley	bl	bl32_plat_enable_mmu
2847c88f3f6SAchin Gupta
285ed108b56SAlexei Fedorov#if ENABLE_PAUTH
286ed108b56SAlexei Fedorov	/* ---------------------------------------------
287ed108b56SAlexei Fedorov	 * Program APIAKey_EL1
288ed108b56SAlexei Fedorov	 * and enable pointer authentication
289ed108b56SAlexei Fedorov	 * ---------------------------------------------
290ed108b56SAlexei Fedorov	 */
291ed108b56SAlexei Fedorov	bl	pauth_init_enable_el1
292ed108b56SAlexei Fedorov#endif /* ENABLE_PAUTH */
293ed108b56SAlexei Fedorov
2947c88f3f6SAchin Gupta	/* ---------------------------------------------
2957c88f3f6SAchin Gupta	 * Enter C runtime to perform any remaining
2967c88f3f6SAchin Gupta	 * book keeping
2977c88f3f6SAchin Gupta	 * ---------------------------------------------
2987c88f3f6SAchin Gupta	 */
2997c88f3f6SAchin Gupta	bl	tsp_cpu_on_main
3007c88f3f6SAchin Gupta	restore_args_call_smc
3017c88f3f6SAchin Gupta
3027c88f3f6SAchin Gupta	/* Should never reach here */
3037c88f3f6SAchin Guptatsp_cpu_on_entry_panic:
3047c88f3f6SAchin Gupta	b	tsp_cpu_on_entry_panic
3058b779620SKévin Petitendfunc tsp_cpu_on_entry
3067c88f3f6SAchin Gupta
3077c88f3f6SAchin Gupta	/*---------------------------------------------
3087c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
3097c88f3f6SAchin Gupta	 * cpu is to be suspended through a CPU_SUSPEND
3107c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
3117c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
3127c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
3137c88f3f6SAchin Gupta	 * the EL1 state.
3147c88f3f6SAchin Gupta	 * ---------------------------------------------
3157c88f3f6SAchin Gupta	 */
3160a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry
3177c88f3f6SAchin Gupta	bl	tsp_cpu_suspend_main
3187c88f3f6SAchin Gupta	restore_args_call_smc
3198b779620SKévin Petitendfunc tsp_cpu_suspend_entry
3207c88f3f6SAchin Gupta
32102446137SSoby Mathew	/*-------------------------------------------------
3226cf89021SAchin Gupta	 * This entrypoint is used by the TSPD to pass
32363b8440fSSoby Mathew	 * control for `synchronously` handling a S-EL1
32463b8440fSSoby Mathew	 * Interrupt which was triggered while executing
32563b8440fSSoby Mathew	 * in normal world. 'x0' contains a magic number
32663b8440fSSoby Mathew	 * which indicates this. TSPD expects control to
32763b8440fSSoby Mathew	 * be handed back at the end of interrupt
32863b8440fSSoby Mathew	 * processing. This is done through an SMC.
32963b8440fSSoby Mathew	 * The handover agreement is:
3306cf89021SAchin Gupta	 *
3316cf89021SAchin Gupta	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
3326cf89021SAchin Gupta	 *    the ELR_EL3 from the non-secure state.
3336cf89021SAchin Gupta	 * 2. TSP has to preserve the callee saved
3346cf89021SAchin Gupta	 *    general purpose registers, SP_EL1/EL0 and
3356cf89021SAchin Gupta	 *    LR.
3366cf89021SAchin Gupta	 * 3. TSP has to preserve the system and vfp
3376cf89021SAchin Gupta	 *    registers (if applicable).
3386cf89021SAchin Gupta	 * 4. TSP can use 'x0-x18' to enable its C
3396cf89021SAchin Gupta	 *    runtime.
3406cf89021SAchin Gupta	 * 5. TSP returns to TSPD using an SMC with
34102446137SSoby Mathew	 *    'x0' = TSP_HANDLED_S_EL1_INTR
34202446137SSoby Mathew	 * ------------------------------------------------
3436cf89021SAchin Gupta	 */
34402446137SSoby Mathewfunc	tsp_sel1_intr_entry
3456cf89021SAchin Gupta#if DEBUG
34663b8440fSSoby Mathew	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
3476cf89021SAchin Gupta	cmp	x0, x2
34802446137SSoby Mathew	b.ne	tsp_sel1_int_entry_panic
3496cf89021SAchin Gupta#endif
35002446137SSoby Mathew	/*-------------------------------------------------
3516cf89021SAchin Gupta	 * Save any previous context needed to perform
3526cf89021SAchin Gupta	 * an exception return from S-EL1 e.g. context
35302446137SSoby Mathew	 * from a previous Non secure Interrupt.
35402446137SSoby Mathew	 * Update statistics and handle the S-EL1
35502446137SSoby Mathew	 * interrupt before returning to the TSPD.
3566cf89021SAchin Gupta	 * IRQ/FIQs are not enabled since that will
3576cf89021SAchin Gupta	 * complicate the implementation. Execution
3586cf89021SAchin Gupta	 * will be transferred back to the normal world
35963b8440fSSoby Mathew	 * in any case. The handler can return 0
36063b8440fSSoby Mathew	 * if the interrupt was handled or TSP_PREEMPTED
36163b8440fSSoby Mathew	 * if the expected interrupt was preempted
36263b8440fSSoby Mathew	 * by an interrupt that should be handled in EL3
36363b8440fSSoby Mathew	 * e.g. Group 0 interrupt in GICv3. In both
36463b8440fSSoby Mathew	 * the cases switch to EL3 using SMC with id
36563b8440fSSoby Mathew	 * TSP_HANDLED_S_EL1_INTR. Any other return value
36663b8440fSSoby Mathew	 * from the handler will result in panic.
36702446137SSoby Mathew	 * ------------------------------------------------
3686cf89021SAchin Gupta	 */
3696cf89021SAchin Gupta	save_eret_context x2 x3
37002446137SSoby Mathew	bl	tsp_update_sync_sel1_intr_stats
37102446137SSoby Mathew	bl	tsp_common_int_handler
37263b8440fSSoby Mathew	/* Check if the S-EL1 interrupt has been handled */
37363b8440fSSoby Mathew	cbnz	x0, tsp_sel1_intr_check_preemption
37463b8440fSSoby Mathew	b	tsp_sel1_intr_return
37563b8440fSSoby Mathewtsp_sel1_intr_check_preemption:
37663b8440fSSoby Mathew	/* Check if the S-EL1 interrupt has been preempted */
37763b8440fSSoby Mathew	mov_imm	x1, TSP_PREEMPTED
37863b8440fSSoby Mathew	cmp	x0, x1
37963b8440fSSoby Mathew	b.ne	tsp_sel1_int_entry_panic
38063b8440fSSoby Mathewtsp_sel1_intr_return:
38163b8440fSSoby Mathew	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
3826cf89021SAchin Gupta	restore_eret_context x2 x3
3836cf89021SAchin Gupta	smc	#0
3846cf89021SAchin Gupta
38563b8440fSSoby Mathew	/* Should never reach here */
38602446137SSoby Mathewtsp_sel1_int_entry_panic:
387a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
38802446137SSoby Mathewendfunc tsp_sel1_intr_entry
3896cf89021SAchin Gupta
3906cf89021SAchin Gupta	/*---------------------------------------------
3917c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
3927c88f3f6SAchin Gupta	 * cpu resumes execution after an earlier
3937c88f3f6SAchin Gupta	 * CPU_SUSPEND psci call to ask the TSP to
3947c88f3f6SAchin Gupta	 * restore its saved context. In the current
3957c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
3967c88f3f6SAchin Gupta	 * EL1 state so nothing is done here apart from
3977c88f3f6SAchin Gupta	 * acknowledging the request.
3987c88f3f6SAchin Gupta	 * ---------------------------------------------
3997c88f3f6SAchin Gupta	 */
4000a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry
4017c88f3f6SAchin Gupta	bl	tsp_cpu_resume_main
4027c88f3f6SAchin Gupta	restore_args_call_smc
4031c3ea103SAntonio Nino Diaz
4041c3ea103SAntonio Nino Diaz	/* Should never reach here */
405a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
4068b779620SKévin Petitendfunc tsp_cpu_resume_entry
4077c88f3f6SAchin Gupta
4087c88f3f6SAchin Gupta	/*---------------------------------------------
4097c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD to ask
4107c88f3f6SAchin Gupta	 * the TSP to service a fast smc request.
4117c88f3f6SAchin Gupta	 * ---------------------------------------------
4127c88f3f6SAchin Gupta	 */
4130a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry
414239b04faSSoby Mathew	bl	tsp_smc_handler
4157c88f3f6SAchin Gupta	restore_args_call_smc
4161c3ea103SAntonio Nino Diaz
4171c3ea103SAntonio Nino Diaz	/* Should never reach here */
418a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
4198b779620SKévin Petitendfunc tsp_fast_smc_entry
4207c88f3f6SAchin Gupta
421239b04faSSoby Mathew	/*---------------------------------------------
422239b04faSSoby Mathew	 * This entrypoint is used by the TSPD to ask
42316292f54SDavid Cunado	 * the TSP to service a Yielding SMC request.
424239b04faSSoby Mathew	 * We will enable preemption during execution
425239b04faSSoby Mathew	 * of tsp_smc_handler.
426239b04faSSoby Mathew	 * ---------------------------------------------
427239b04faSSoby Mathew	 */
42816292f54SDavid Cunadofunc tsp_yield_smc_entry
429239b04faSSoby Mathew	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
430239b04faSSoby Mathew	bl	tsp_smc_handler
431239b04faSSoby Mathew	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
432239b04faSSoby Mathew	restore_args_call_smc
4331c3ea103SAntonio Nino Diaz
4341c3ea103SAntonio Nino Diaz	/* Should never reach here */
435a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
43616292f54SDavid Cunadoendfunc tsp_yield_smc_entry
4373df6012aSDouglas Raillard
4383df6012aSDouglas Raillard	/*---------------------------------------------------------------------
43916292f54SDavid Cunado	 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
4403df6012aSDouglas Raillard	 * SMC. It could be on behalf of non-secure world or because a CPU
4413df6012aSDouglas Raillard	 * suspend/CPU off request needs to abort the preempted SMC.
4423df6012aSDouglas Raillard	 * --------------------------------------------------------------------
4433df6012aSDouglas Raillard	 */
44416292f54SDavid Cunadofunc tsp_abort_yield_smc_entry
4453df6012aSDouglas Raillard
4463df6012aSDouglas Raillard	/*
4473df6012aSDouglas Raillard	 * Exceptions masking is already done by the TSPD when entering this
4483df6012aSDouglas Raillard	 * hook so there is no need to do it here.
4493df6012aSDouglas Raillard	 */
4503df6012aSDouglas Raillard
4513df6012aSDouglas Raillard	/* Reset the stack used by the pre-empted SMC */
4523df6012aSDouglas Raillard	bl	plat_set_my_stack
4533df6012aSDouglas Raillard
4543df6012aSDouglas Raillard	/*
4553df6012aSDouglas Raillard	 * Allow some cleanup such as releasing locks.
4563df6012aSDouglas Raillard	 */
4573df6012aSDouglas Raillard	bl	tsp_abort_smc_handler
4583df6012aSDouglas Raillard
4593df6012aSDouglas Raillard	restore_args_call_smc
4603df6012aSDouglas Raillard
4613df6012aSDouglas Raillard	/* Should never reach here */
4623df6012aSDouglas Raillard	bl	plat_panic_handler
46316292f54SDavid Cunadoendfunc tsp_abort_yield_smc_entry
464