xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision 9fc59639e649f614318f78ae2ca103fe102405ec)
17c88f3f6SAchin Gupta/*
267b6ff9fSAntonio Nino Diaz * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause
57c88f3f6SAchin Gupta */
67c88f3f6SAchin Gupta
77c88f3f6SAchin Gupta#include <arch.h>
80a30cf54SAndrew Thoelke#include <asm_macros.S>
909d40e0eSAntonio Nino Diaz#include <bl32/tsp/tsp.h>
1009d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h>
1109d40e0eSAntonio Nino Diaz
12da0af78aSDan Handley#include "../tsp_private.h"
137c88f3f6SAchin Gupta
147c88f3f6SAchin Gupta
157c88f3f6SAchin Gupta	.globl	tsp_entrypoint
16399fb08fSAndrew Thoelke	.globl  tsp_vector_table
177c88f3f6SAchin Gupta
18239b04faSSoby Mathew
19239b04faSSoby Mathew
207c88f3f6SAchin Gupta	/* ---------------------------------------------
217c88f3f6SAchin Gupta	 * Populate the params in x0-x7 from the pointer
227c88f3f6SAchin Gupta	 * to the smc args structure in x0.
237c88f3f6SAchin Gupta	 * ---------------------------------------------
247c88f3f6SAchin Gupta	 */
257c88f3f6SAchin Gupta	.macro restore_args_call_smc
267c88f3f6SAchin Gupta	ldp	x6, x7, [x0, #TSP_ARG6]
277c88f3f6SAchin Gupta	ldp	x4, x5, [x0, #TSP_ARG4]
287c88f3f6SAchin Gupta	ldp	x2, x3, [x0, #TSP_ARG2]
297c88f3f6SAchin Gupta	ldp	x0, x1, [x0, #TSP_ARG0]
307c88f3f6SAchin Gupta	smc	#0
317c88f3f6SAchin Gupta	.endm
327c88f3f6SAchin Gupta
336cf89021SAchin Gupta	.macro	save_eret_context reg1 reg2
346cf89021SAchin Gupta	mrs	\reg1, elr_el1
356cf89021SAchin Gupta	mrs	\reg2, spsr_el1
366cf89021SAchin Gupta	stp	\reg1, \reg2, [sp, #-0x10]!
376cf89021SAchin Gupta	stp	x30, x18, [sp, #-0x10]!
386cf89021SAchin Gupta	.endm
396cf89021SAchin Gupta
406cf89021SAchin Gupta	.macro restore_eret_context reg1 reg2
416cf89021SAchin Gupta	ldp	x30, x18, [sp], #0x10
426cf89021SAchin Gupta	ldp	\reg1, \reg2, [sp], #0x10
436cf89021SAchin Gupta	msr	elr_el1, \reg1
446cf89021SAchin Gupta	msr	spsr_el1, \reg2
456cf89021SAchin Gupta	.endm
466cf89021SAchin Gupta
4764726e6dSJulius Wernerfunc tsp_entrypoint _align=3
487c88f3f6SAchin Gupta
497c88f3f6SAchin Gupta	/* ---------------------------------------------
507c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
517c88f3f6SAchin Gupta	 * ---------------------------------------------
527c88f3f6SAchin Gupta	 */
5357356e90SAchin Gupta	adr	x0, tsp_exceptions
547c88f3f6SAchin Gupta	msr	vbar_el1, x0
550c8d4fefSAchin Gupta	isb
560c8d4fefSAchin Gupta
570c8d4fefSAchin Gupta	/* ---------------------------------------------
580c8d4fefSAchin Gupta	 * Enable the SError interrupt now that the
590c8d4fefSAchin Gupta	 * exception vectors have been setup.
600c8d4fefSAchin Gupta	 * ---------------------------------------------
610c8d4fefSAchin Gupta	 */
620c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
637c88f3f6SAchin Gupta
647c88f3f6SAchin Gupta	/* ---------------------------------------------
65ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
6602b57943SJohn Tsichritzis	 * and data access alignment checks and disable
6702b57943SJohn Tsichritzis	 * speculative loads.
687c88f3f6SAchin Gupta	 * ---------------------------------------------
697c88f3f6SAchin Gupta	 */
70ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
717c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
72ec3c1003SAchin Gupta	orr	x0, x0, x1
7302b57943SJohn Tsichritzis	bic	x0, x0, #SCTLR_DSSBS_BIT
747c88f3f6SAchin Gupta	msr	sctlr_el1, x0
757c88f3f6SAchin Gupta	isb
767c88f3f6SAchin Gupta
777c88f3f6SAchin Gupta	/* ---------------------------------------------
7854dc71e7SAchin Gupta	 * Invalidate the RW memory used by the BL32
7954dc71e7SAchin Gupta	 * image. This includes the data and NOBITS
8054dc71e7SAchin Gupta	 * sections. This is done to safeguard against
8154dc71e7SAchin Gupta	 * possible corruption of this memory by dirty
8254dc71e7SAchin Gupta	 * cache lines in a system cache as a result of
8354dc71e7SAchin Gupta	 * use by an earlier boot loader stage.
8454dc71e7SAchin Gupta	 * ---------------------------------------------
8554dc71e7SAchin Gupta	 */
8654dc71e7SAchin Gupta	adr	x0, __RW_START__
8754dc71e7SAchin Gupta	adr	x1, __RW_END__
8854dc71e7SAchin Gupta	sub	x1, x1, x0
8954dc71e7SAchin Gupta	bl	inv_dcache_range
9054dc71e7SAchin Gupta
9154dc71e7SAchin Gupta	/* ---------------------------------------------
927c88f3f6SAchin Gupta	 * Zero out NOBITS sections. There are 2 of them:
937c88f3f6SAchin Gupta	 *   - the .bss section;
947c88f3f6SAchin Gupta	 *   - the coherent memory section.
957c88f3f6SAchin Gupta	 * ---------------------------------------------
967c88f3f6SAchin Gupta	 */
977c88f3f6SAchin Gupta	ldr	x0, =__BSS_START__
987c88f3f6SAchin Gupta	ldr	x1, =__BSS_SIZE__
99308d359bSDouglas Raillard	bl	zeromem
1007c88f3f6SAchin Gupta
101ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1027c88f3f6SAchin Gupta	ldr	x0, =__COHERENT_RAM_START__
1037c88f3f6SAchin Gupta	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
104308d359bSDouglas Raillard	bl	zeromem
105ab8707e6SSoby Mathew#endif
1067c88f3f6SAchin Gupta
1077c88f3f6SAchin Gupta	/* --------------------------------------------
108754a2b7aSAchin Gupta	 * Allocate a stack whose memory will be marked
109754a2b7aSAchin Gupta	 * as Normal-IS-WBWA when the MMU is enabled.
110754a2b7aSAchin Gupta	 * There is no risk of reading stale stack
111754a2b7aSAchin Gupta	 * memory after enabling the MMU as only the
112754a2b7aSAchin Gupta	 * primary cpu is running at the moment.
1137c88f3f6SAchin Gupta	 * --------------------------------------------
1147c88f3f6SAchin Gupta	 */
115fd650ff6SSoby Mathew	bl	plat_set_my_stack
1167c88f3f6SAchin Gupta
1177c88f3f6SAchin Gupta	/* ---------------------------------------------
11851faada7SDouglas Raillard	 * Initialize the stack protector canary before
11951faada7SDouglas Raillard	 * any C code is called.
12051faada7SDouglas Raillard	 * ---------------------------------------------
12151faada7SDouglas Raillard	 */
12251faada7SDouglas Raillard#if STACK_PROTECTOR_ENABLED
12351faada7SDouglas Raillard	bl	update_stack_protector_canary
12451faada7SDouglas Raillard#endif
12551faada7SDouglas Raillard
12651faada7SDouglas Raillard	/* ---------------------------------------------
12767b6ff9fSAntonio Nino Diaz	 * Perform TSP setup
1287c88f3f6SAchin Gupta	 * ---------------------------------------------
1297c88f3f6SAchin Gupta	 */
13067b6ff9fSAntonio Nino Diaz	bl	tsp_setup
13167b6ff9fSAntonio Nino Diaz
13267b6ff9fSAntonio Nino Diaz	/* ---------------------------------------------
13367b6ff9fSAntonio Nino Diaz	 * Enable pointer authentication
13467b6ff9fSAntonio Nino Diaz	 * ---------------------------------------------
13567b6ff9fSAntonio Nino Diaz	 */
13667b6ff9fSAntonio Nino Diaz#if ENABLE_PAUTH
13767b6ff9fSAntonio Nino Diaz	mrs	x0, sctlr_el1
13867b6ff9fSAntonio Nino Diaz	orr	x0, x0, #SCTLR_EnIA_BIT
139*9fc59639SAlexei Fedorov#if ENABLE_BTI
140*9fc59639SAlexei Fedorov	/* ---------------------------------------------
141*9fc59639SAlexei Fedorov	 * Enable PAC branch type compatibility
142*9fc59639SAlexei Fedorov	 * ---------------------------------------------
143*9fc59639SAlexei Fedorov	 */
144*9fc59639SAlexei Fedorov	bic	x0, x0, #(SCTLR_BT0_BIT | SCTLR_BT1_BIT)
145*9fc59639SAlexei Fedorov#endif	/* ENABLE_BTI */
14667b6ff9fSAntonio Nino Diaz	msr	sctlr_el1, x0
14767b6ff9fSAntonio Nino Diaz	isb
14867b6ff9fSAntonio Nino Diaz#endif /* ENABLE_PAUTH */
1497c88f3f6SAchin Gupta
1507c88f3f6SAchin Gupta	/* ---------------------------------------------
1517c88f3f6SAchin Gupta	 * Jump to main function.
1527c88f3f6SAchin Gupta	 * ---------------------------------------------
1537c88f3f6SAchin Gupta	 */
1547c88f3f6SAchin Gupta	bl	tsp_main
1557c88f3f6SAchin Gupta
1567c88f3f6SAchin Gupta	/* ---------------------------------------------
1577c88f3f6SAchin Gupta	 * Tell TSPD that we are done initialising
1587c88f3f6SAchin Gupta	 * ---------------------------------------------
1597c88f3f6SAchin Gupta	 */
1607c88f3f6SAchin Gupta	mov	x1, x0
1617c88f3f6SAchin Gupta	mov	x0, #TSP_ENTRY_DONE
1627c88f3f6SAchin Gupta	smc	#0
1637c88f3f6SAchin Gupta
1647c88f3f6SAchin Guptatsp_entrypoint_panic:
1657c88f3f6SAchin Gupta	b	tsp_entrypoint_panic
1668b779620SKévin Petitendfunc tsp_entrypoint
1677c88f3f6SAchin Gupta
168399fb08fSAndrew Thoelke
169399fb08fSAndrew Thoelke	/* -------------------------------------------
170399fb08fSAndrew Thoelke	 * Table of entrypoint vectors provided to the
171399fb08fSAndrew Thoelke	 * TSPD for the various entrypoints
172399fb08fSAndrew Thoelke	 * -------------------------------------------
173399fb08fSAndrew Thoelke	 */
174*9fc59639SAlexei Fedorovvector_base tsp_vector_table
17516292f54SDavid Cunado	b	tsp_yield_smc_entry
176399fb08fSAndrew Thoelke	b	tsp_fast_smc_entry
177399fb08fSAndrew Thoelke	b	tsp_cpu_on_entry
178399fb08fSAndrew Thoelke	b	tsp_cpu_off_entry
179399fb08fSAndrew Thoelke	b	tsp_cpu_resume_entry
180399fb08fSAndrew Thoelke	b	tsp_cpu_suspend_entry
18102446137SSoby Mathew	b	tsp_sel1_intr_entry
182d5f13093SJuan Castillo	b	tsp_system_off_entry
183d5f13093SJuan Castillo	b	tsp_system_reset_entry
18416292f54SDavid Cunado	b	tsp_abort_yield_smc_entry
185399fb08fSAndrew Thoelke
1867c88f3f6SAchin Gupta	/*---------------------------------------------
1877c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
1887c88f3f6SAchin Gupta	 * cpu is to be turned off through a CPU_OFF
1897c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
1907c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
1917c88f3f6SAchin Gupta	 * implementation, the TSPD expects the TSP to
1927c88f3f6SAchin Gupta	 * re-initialise its state so nothing is done
1937c88f3f6SAchin Gupta	 * here except for acknowledging the request.
1947c88f3f6SAchin Gupta	 * ---------------------------------------------
1957c88f3f6SAchin Gupta	 */
1960a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry
1977c88f3f6SAchin Gupta	bl	tsp_cpu_off_main
1987c88f3f6SAchin Gupta	restore_args_call_smc
1998b779620SKévin Petitendfunc tsp_cpu_off_entry
2007c88f3f6SAchin Gupta
2017c88f3f6SAchin Gupta	/*---------------------------------------------
202d5f13093SJuan Castillo	 * This entrypoint is used by the TSPD when the
203d5f13093SJuan Castillo	 * system is about to be switched off (through
204d5f13093SJuan Castillo	 * a SYSTEM_OFF psci call) to ask the TSP to
205d5f13093SJuan Castillo	 * perform any necessary bookkeeping.
206d5f13093SJuan Castillo	 * ---------------------------------------------
207d5f13093SJuan Castillo	 */
208d5f13093SJuan Castillofunc tsp_system_off_entry
209d5f13093SJuan Castillo	bl	tsp_system_off_main
210d5f13093SJuan Castillo	restore_args_call_smc
2118b779620SKévin Petitendfunc tsp_system_off_entry
212d5f13093SJuan Castillo
213d5f13093SJuan Castillo	/*---------------------------------------------
214d5f13093SJuan Castillo	 * This entrypoint is used by the TSPD when the
215d5f13093SJuan Castillo	 * system is about to be reset (through a
216d5f13093SJuan Castillo	 * SYSTEM_RESET psci call) to ask the TSP to
217d5f13093SJuan Castillo	 * perform any necessary bookkeeping.
218d5f13093SJuan Castillo	 * ---------------------------------------------
219d5f13093SJuan Castillo	 */
220d5f13093SJuan Castillofunc tsp_system_reset_entry
221d5f13093SJuan Castillo	bl	tsp_system_reset_main
222d5f13093SJuan Castillo	restore_args_call_smc
2238b779620SKévin Petitendfunc tsp_system_reset_entry
224d5f13093SJuan Castillo
225d5f13093SJuan Castillo	/*---------------------------------------------
2267c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
2277c88f3f6SAchin Gupta	 * cpu is turned on using a CPU_ON psci call to
2287c88f3f6SAchin Gupta	 * ask the TSP to initialise itself i.e. setup
2297c88f3f6SAchin Gupta	 * the mmu, stacks etc. Minimal architectural
2307c88f3f6SAchin Gupta	 * state will be initialised by the TSPD when
2317c88f3f6SAchin Gupta	 * this function is entered i.e. Caches and MMU
2327c88f3f6SAchin Gupta	 * will be turned off, the execution state
2337c88f3f6SAchin Gupta	 * will be aarch64 and exceptions masked.
2347c88f3f6SAchin Gupta	 * ---------------------------------------------
2357c88f3f6SAchin Gupta	 */
2360a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry
2377c88f3f6SAchin Gupta	/* ---------------------------------------------
2387c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
2397c88f3f6SAchin Gupta	 * ---------------------------------------------
2407c88f3f6SAchin Gupta	 */
24157356e90SAchin Gupta	adr	x0, tsp_exceptions
2427c88f3f6SAchin Gupta	msr	vbar_el1, x0
2430c8d4fefSAchin Gupta	isb
2440c8d4fefSAchin Gupta
2450c8d4fefSAchin Gupta	/* Enable the SError interrupt */
2460c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
2477c88f3f6SAchin Gupta
2487c88f3f6SAchin Gupta	/* ---------------------------------------------
249ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
250ec3c1003SAchin Gupta	 * and data access alignment checks
2517c88f3f6SAchin Gupta	 * ---------------------------------------------
2527c88f3f6SAchin Gupta	 */
253ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
2547c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
255ec3c1003SAchin Gupta	orr	x0, x0, x1
2567c88f3f6SAchin Gupta	msr	sctlr_el1, x0
2577c88f3f6SAchin Gupta	isb
2587c88f3f6SAchin Gupta
2597c88f3f6SAchin Gupta	/* --------------------------------------------
260b51da821SAchin Gupta	 * Give ourselves a stack whose memory will be
261b51da821SAchin Gupta	 * marked as Normal-IS-WBWA when the MMU is
262b51da821SAchin Gupta	 * enabled.
2637c88f3f6SAchin Gupta	 * --------------------------------------------
2647c88f3f6SAchin Gupta	 */
265fd650ff6SSoby Mathew	bl	plat_set_my_stack
2667c88f3f6SAchin Gupta
267b51da821SAchin Gupta	/* --------------------------------------------
268bb00ea5bSJeenu Viswambharan	 * Enable MMU and D-caches together.
269b51da821SAchin Gupta	 * --------------------------------------------
2707c88f3f6SAchin Gupta	 */
271bb00ea5bSJeenu Viswambharan	mov	x0, #0
272dff8e47aSDan Handley	bl	bl32_plat_enable_mmu
2737c88f3f6SAchin Gupta
2747c88f3f6SAchin Gupta	/* ---------------------------------------------
2757c88f3f6SAchin Gupta	 * Enter C runtime to perform any remaining
2767c88f3f6SAchin Gupta	 * book keeping
2777c88f3f6SAchin Gupta	 * ---------------------------------------------
2787c88f3f6SAchin Gupta	 */
2797c88f3f6SAchin Gupta	bl	tsp_cpu_on_main
2807c88f3f6SAchin Gupta	restore_args_call_smc
2817c88f3f6SAchin Gupta
2827c88f3f6SAchin Gupta	/* Should never reach here */
2837c88f3f6SAchin Guptatsp_cpu_on_entry_panic:
2847c88f3f6SAchin Gupta	b	tsp_cpu_on_entry_panic
2858b779620SKévin Petitendfunc tsp_cpu_on_entry
2867c88f3f6SAchin Gupta
2877c88f3f6SAchin Gupta	/*---------------------------------------------
2887c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
2897c88f3f6SAchin Gupta	 * cpu is to be suspended through a CPU_SUSPEND
2907c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
2917c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
2927c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
2937c88f3f6SAchin Gupta	 * the EL1 state.
2947c88f3f6SAchin Gupta	 * ---------------------------------------------
2957c88f3f6SAchin Gupta	 */
2960a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry
2977c88f3f6SAchin Gupta	bl	tsp_cpu_suspend_main
2987c88f3f6SAchin Gupta	restore_args_call_smc
2998b779620SKévin Petitendfunc tsp_cpu_suspend_entry
3007c88f3f6SAchin Gupta
30102446137SSoby Mathew	/*-------------------------------------------------
3026cf89021SAchin Gupta	 * This entrypoint is used by the TSPD to pass
30363b8440fSSoby Mathew	 * control for `synchronously` handling a S-EL1
30463b8440fSSoby Mathew	 * Interrupt which was triggered while executing
30563b8440fSSoby Mathew	 * in normal world. 'x0' contains a magic number
30663b8440fSSoby Mathew	 * which indicates this. TSPD expects control to
30763b8440fSSoby Mathew	 * be handed back at the end of interrupt
30863b8440fSSoby Mathew	 * processing. This is done through an SMC.
30963b8440fSSoby Mathew	 * The handover agreement is:
3106cf89021SAchin Gupta	 *
3116cf89021SAchin Gupta	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
3126cf89021SAchin Gupta	 *    the ELR_EL3 from the non-secure state.
3136cf89021SAchin Gupta	 * 2. TSP has to preserve the callee saved
3146cf89021SAchin Gupta	 *    general purpose registers, SP_EL1/EL0 and
3156cf89021SAchin Gupta	 *    LR.
3166cf89021SAchin Gupta	 * 3. TSP has to preserve the system and vfp
3176cf89021SAchin Gupta	 *    registers (if applicable).
3186cf89021SAchin Gupta	 * 4. TSP can use 'x0-x18' to enable its C
3196cf89021SAchin Gupta	 *    runtime.
3206cf89021SAchin Gupta	 * 5. TSP returns to TSPD using an SMC with
32102446137SSoby Mathew	 *    'x0' = TSP_HANDLED_S_EL1_INTR
32202446137SSoby Mathew	 * ------------------------------------------------
3236cf89021SAchin Gupta	 */
32402446137SSoby Mathewfunc	tsp_sel1_intr_entry
3256cf89021SAchin Gupta#if DEBUG
32663b8440fSSoby Mathew	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
3276cf89021SAchin Gupta	cmp	x0, x2
32802446137SSoby Mathew	b.ne	tsp_sel1_int_entry_panic
3296cf89021SAchin Gupta#endif
33002446137SSoby Mathew	/*-------------------------------------------------
3316cf89021SAchin Gupta	 * Save any previous context needed to perform
3326cf89021SAchin Gupta	 * an exception return from S-EL1 e.g. context
33302446137SSoby Mathew	 * from a previous Non secure Interrupt.
33402446137SSoby Mathew	 * Update statistics and handle the S-EL1
33502446137SSoby Mathew	 * interrupt before returning to the TSPD.
3366cf89021SAchin Gupta	 * IRQ/FIQs are not enabled since that will
3376cf89021SAchin Gupta	 * complicate the implementation. Execution
3386cf89021SAchin Gupta	 * will be transferred back to the normal world
33963b8440fSSoby Mathew	 * in any case. The handler can return 0
34063b8440fSSoby Mathew	 * if the interrupt was handled or TSP_PREEMPTED
34163b8440fSSoby Mathew	 * if the expected interrupt was preempted
34263b8440fSSoby Mathew	 * by an interrupt that should be handled in EL3
34363b8440fSSoby Mathew	 * e.g. Group 0 interrupt in GICv3. In both
34463b8440fSSoby Mathew	 * the cases switch to EL3 using SMC with id
34563b8440fSSoby Mathew	 * TSP_HANDLED_S_EL1_INTR. Any other return value
34663b8440fSSoby Mathew	 * from the handler will result in panic.
34702446137SSoby Mathew	 * ------------------------------------------------
3486cf89021SAchin Gupta	 */
3496cf89021SAchin Gupta	save_eret_context x2 x3
35002446137SSoby Mathew	bl	tsp_update_sync_sel1_intr_stats
35102446137SSoby Mathew	bl	tsp_common_int_handler
35263b8440fSSoby Mathew	/* Check if the S-EL1 interrupt has been handled */
35363b8440fSSoby Mathew	cbnz	x0, tsp_sel1_intr_check_preemption
35463b8440fSSoby Mathew	b	tsp_sel1_intr_return
35563b8440fSSoby Mathewtsp_sel1_intr_check_preemption:
35663b8440fSSoby Mathew	/* Check if the S-EL1 interrupt has been preempted */
35763b8440fSSoby Mathew	mov_imm	x1, TSP_PREEMPTED
35863b8440fSSoby Mathew	cmp	x0, x1
35963b8440fSSoby Mathew	b.ne	tsp_sel1_int_entry_panic
36063b8440fSSoby Mathewtsp_sel1_intr_return:
36163b8440fSSoby Mathew	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
3626cf89021SAchin Gupta	restore_eret_context x2 x3
3636cf89021SAchin Gupta	smc	#0
3646cf89021SAchin Gupta
36563b8440fSSoby Mathew	/* Should never reach here */
36602446137SSoby Mathewtsp_sel1_int_entry_panic:
367a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
36802446137SSoby Mathewendfunc tsp_sel1_intr_entry
3696cf89021SAchin Gupta
3706cf89021SAchin Gupta	/*---------------------------------------------
3717c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
3727c88f3f6SAchin Gupta	 * cpu resumes execution after an earlier
3737c88f3f6SAchin Gupta	 * CPU_SUSPEND psci call to ask the TSP to
3747c88f3f6SAchin Gupta	 * restore its saved context. In the current
3757c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
3767c88f3f6SAchin Gupta	 * EL1 state so nothing is done here apart from
3777c88f3f6SAchin Gupta	 * acknowledging the request.
3787c88f3f6SAchin Gupta	 * ---------------------------------------------
3797c88f3f6SAchin Gupta	 */
3800a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry
3817c88f3f6SAchin Gupta	bl	tsp_cpu_resume_main
3827c88f3f6SAchin Gupta	restore_args_call_smc
3831c3ea103SAntonio Nino Diaz
3841c3ea103SAntonio Nino Diaz	/* Should never reach here */
385a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
3868b779620SKévin Petitendfunc tsp_cpu_resume_entry
3877c88f3f6SAchin Gupta
3887c88f3f6SAchin Gupta	/*---------------------------------------------
3897c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD to ask
3907c88f3f6SAchin Gupta	 * the TSP to service a fast smc request.
3917c88f3f6SAchin Gupta	 * ---------------------------------------------
3927c88f3f6SAchin Gupta	 */
3930a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry
394239b04faSSoby Mathew	bl	tsp_smc_handler
3957c88f3f6SAchin Gupta	restore_args_call_smc
3961c3ea103SAntonio Nino Diaz
3971c3ea103SAntonio Nino Diaz	/* Should never reach here */
398a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
3998b779620SKévin Petitendfunc tsp_fast_smc_entry
4007c88f3f6SAchin Gupta
401239b04faSSoby Mathew	/*---------------------------------------------
402239b04faSSoby Mathew	 * This entrypoint is used by the TSPD to ask
40316292f54SDavid Cunado	 * the TSP to service a Yielding SMC request.
404239b04faSSoby Mathew	 * We will enable preemption during execution
405239b04faSSoby Mathew	 * of tsp_smc_handler.
406239b04faSSoby Mathew	 * ---------------------------------------------
407239b04faSSoby Mathew	 */
40816292f54SDavid Cunadofunc tsp_yield_smc_entry
409239b04faSSoby Mathew	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
410239b04faSSoby Mathew	bl	tsp_smc_handler
411239b04faSSoby Mathew	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
412239b04faSSoby Mathew	restore_args_call_smc
4131c3ea103SAntonio Nino Diaz
4141c3ea103SAntonio Nino Diaz	/* Should never reach here */
415a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
41616292f54SDavid Cunadoendfunc tsp_yield_smc_entry
4173df6012aSDouglas Raillard
4183df6012aSDouglas Raillard	/*---------------------------------------------------------------------
41916292f54SDavid Cunado	 * This entrypoint is used by the TSPD to abort a pre-empted Yielding
4203df6012aSDouglas Raillard	 * SMC. It could be on behalf of non-secure world or because a CPU
4213df6012aSDouglas Raillard	 * suspend/CPU off request needs to abort the preempted SMC.
4223df6012aSDouglas Raillard	 * --------------------------------------------------------------------
4233df6012aSDouglas Raillard	 */
42416292f54SDavid Cunadofunc tsp_abort_yield_smc_entry
4253df6012aSDouglas Raillard
4263df6012aSDouglas Raillard	/*
4273df6012aSDouglas Raillard	 * Exceptions masking is already done by the TSPD when entering this
4283df6012aSDouglas Raillard	 * hook so there is no need to do it here.
4293df6012aSDouglas Raillard	 */
4303df6012aSDouglas Raillard
4313df6012aSDouglas Raillard	/* Reset the stack used by the pre-empted SMC */
4323df6012aSDouglas Raillard	bl	plat_set_my_stack
4333df6012aSDouglas Raillard
4343df6012aSDouglas Raillard	/*
4353df6012aSDouglas Raillard	 * Allow some cleanup such as releasing locks.
4363df6012aSDouglas Raillard	 */
4373df6012aSDouglas Raillard	bl	tsp_abort_smc_handler
4383df6012aSDouglas Raillard
4393df6012aSDouglas Raillard	restore_args_call_smc
4403df6012aSDouglas Raillard
4413df6012aSDouglas Raillard	/* Should never reach here */
4423df6012aSDouglas Raillard	bl	plat_panic_handler
44316292f54SDavid Cunadoendfunc tsp_abort_yield_smc_entry
444