xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision 0c8d4fef28768233f1f46b4d085f904293dffd2c)
17c88f3f6SAchin Gupta/*
27c88f3f6SAchin Gupta * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
47c88f3f6SAchin Gupta * Redistribution and use in source and binary forms, with or without
57c88f3f6SAchin Gupta * modification, are permitted provided that the following conditions are met:
67c88f3f6SAchin Gupta *
77c88f3f6SAchin Gupta * Redistributions of source code must retain the above copyright notice, this
87c88f3f6SAchin Gupta * list of conditions and the following disclaimer.
97c88f3f6SAchin Gupta *
107c88f3f6SAchin Gupta * Redistributions in binary form must reproduce the above copyright notice,
117c88f3f6SAchin Gupta * this list of conditions and the following disclaimer in the documentation
127c88f3f6SAchin Gupta * and/or other materials provided with the distribution.
137c88f3f6SAchin Gupta *
147c88f3f6SAchin Gupta * Neither the name of ARM nor the names of its contributors may be used
157c88f3f6SAchin Gupta * to endorse or promote products derived from this software without specific
167c88f3f6SAchin Gupta * prior written permission.
177c88f3f6SAchin Gupta *
187c88f3f6SAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
197c88f3f6SAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
207c88f3f6SAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
217c88f3f6SAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
227c88f3f6SAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
237c88f3f6SAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
247c88f3f6SAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
257c88f3f6SAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
267c88f3f6SAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
277c88f3f6SAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
287c88f3f6SAchin Gupta * POSSIBILITY OF SUCH DAMAGE.
297c88f3f6SAchin Gupta */
307c88f3f6SAchin Gupta
317c88f3f6SAchin Gupta#include <arch.h>
320a30cf54SAndrew Thoelke#include <asm_macros.S>
3397043ac9SDan Handley#include <tsp.h>
34b51da821SAchin Gupta#include <xlat_tables.h>
357c88f3f6SAchin Gupta
367c88f3f6SAchin Gupta
377c88f3f6SAchin Gupta	.globl	tsp_entrypoint
38399fb08fSAndrew Thoelke	.globl  tsp_vector_table
397c88f3f6SAchin Gupta
40239b04faSSoby Mathew
41239b04faSSoby Mathew
427c88f3f6SAchin Gupta	/* ---------------------------------------------
437c88f3f6SAchin Gupta	 * Populate the params in x0-x7 from the pointer
447c88f3f6SAchin Gupta	 * to the smc args structure in x0.
457c88f3f6SAchin Gupta	 * ---------------------------------------------
467c88f3f6SAchin Gupta	 */
477c88f3f6SAchin Gupta	.macro restore_args_call_smc
487c88f3f6SAchin Gupta	ldp	x6, x7, [x0, #TSP_ARG6]
497c88f3f6SAchin Gupta	ldp	x4, x5, [x0, #TSP_ARG4]
507c88f3f6SAchin Gupta	ldp	x2, x3, [x0, #TSP_ARG2]
517c88f3f6SAchin Gupta	ldp	x0, x1, [x0, #TSP_ARG0]
527c88f3f6SAchin Gupta	smc	#0
537c88f3f6SAchin Gupta	.endm
547c88f3f6SAchin Gupta
556cf89021SAchin Gupta	.macro	save_eret_context reg1 reg2
566cf89021SAchin Gupta	mrs	\reg1, elr_el1
576cf89021SAchin Gupta	mrs	\reg2, spsr_el1
586cf89021SAchin Gupta	stp	\reg1, \reg2, [sp, #-0x10]!
596cf89021SAchin Gupta	stp	x30, x18, [sp, #-0x10]!
606cf89021SAchin Gupta	.endm
616cf89021SAchin Gupta
626cf89021SAchin Gupta	.macro restore_eret_context reg1 reg2
636cf89021SAchin Gupta	ldp	x30, x18, [sp], #0x10
646cf89021SAchin Gupta	ldp	\reg1, \reg2, [sp], #0x10
656cf89021SAchin Gupta	msr	elr_el1, \reg1
666cf89021SAchin Gupta	msr	spsr_el1, \reg2
676cf89021SAchin Gupta	.endm
686cf89021SAchin Gupta
696cf89021SAchin Gupta	.section	.text, "ax"
706cf89021SAchin Gupta	.align 3
717c88f3f6SAchin Gupta
720a30cf54SAndrew Thoelkefunc tsp_entrypoint
737c88f3f6SAchin Gupta
747c88f3f6SAchin Gupta	/* ---------------------------------------------
757c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
767c88f3f6SAchin Gupta	 * ---------------------------------------------
777c88f3f6SAchin Gupta	 */
7857356e90SAchin Gupta	adr	x0, tsp_exceptions
797c88f3f6SAchin Gupta	msr	vbar_el1, x0
80*0c8d4fefSAchin Gupta	isb
81*0c8d4fefSAchin Gupta
82*0c8d4fefSAchin Gupta	/* ---------------------------------------------
83*0c8d4fefSAchin Gupta	 * Enable the SError interrupt now that the
84*0c8d4fefSAchin Gupta	 * exception vectors have been setup.
85*0c8d4fefSAchin Gupta	 * ---------------------------------------------
86*0c8d4fefSAchin Gupta	 */
87*0c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
887c88f3f6SAchin Gupta
897c88f3f6SAchin Gupta	/* ---------------------------------------------
90ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
91ec3c1003SAchin Gupta	 * and data access alignment checks
927c88f3f6SAchin Gupta	 * ---------------------------------------------
937c88f3f6SAchin Gupta	 */
94ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
957c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
96ec3c1003SAchin Gupta	orr	x0, x0, x1
977c88f3f6SAchin Gupta	msr	sctlr_el1, x0
987c88f3f6SAchin Gupta	isb
997c88f3f6SAchin Gupta
1007c88f3f6SAchin Gupta	/* ---------------------------------------------
1017c88f3f6SAchin Gupta	 * Zero out NOBITS sections. There are 2 of them:
1027c88f3f6SAchin Gupta	 *   - the .bss section;
1037c88f3f6SAchin Gupta	 *   - the coherent memory section.
1047c88f3f6SAchin Gupta	 * ---------------------------------------------
1057c88f3f6SAchin Gupta	 */
1067c88f3f6SAchin Gupta	ldr	x0, =__BSS_START__
1077c88f3f6SAchin Gupta	ldr	x1, =__BSS_SIZE__
1087c88f3f6SAchin Gupta	bl	zeromem16
1097c88f3f6SAchin Gupta
1107c88f3f6SAchin Gupta	ldr	x0, =__COHERENT_RAM_START__
1117c88f3f6SAchin Gupta	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
1127c88f3f6SAchin Gupta	bl	zeromem16
1137c88f3f6SAchin Gupta
1147c88f3f6SAchin Gupta	/* --------------------------------------------
115754a2b7aSAchin Gupta	 * Allocate a stack whose memory will be marked
116754a2b7aSAchin Gupta	 * as Normal-IS-WBWA when the MMU is enabled.
117754a2b7aSAchin Gupta	 * There is no risk of reading stale stack
118754a2b7aSAchin Gupta	 * memory after enabling the MMU as only the
119754a2b7aSAchin Gupta	 * primary cpu is running at the moment.
1207c88f3f6SAchin Gupta	 * --------------------------------------------
1217c88f3f6SAchin Gupta	 */
1227c88f3f6SAchin Gupta	mrs	x0, mpidr_el1
123754a2b7aSAchin Gupta	bl	platform_set_stack
1247c88f3f6SAchin Gupta
1257c88f3f6SAchin Gupta	/* ---------------------------------------------
1267c88f3f6SAchin Gupta	 * Perform early platform setup & platform
1277c88f3f6SAchin Gupta	 * specific early arch. setup e.g. mmu setup
1287c88f3f6SAchin Gupta	 * ---------------------------------------------
1297c88f3f6SAchin Gupta	 */
1307c88f3f6SAchin Gupta	bl	bl32_early_platform_setup
1317c88f3f6SAchin Gupta	bl	bl32_plat_arch_setup
1327c88f3f6SAchin Gupta
1337c88f3f6SAchin Gupta	/* ---------------------------------------------
1347c88f3f6SAchin Gupta	 * Jump to main function.
1357c88f3f6SAchin Gupta	 * ---------------------------------------------
1367c88f3f6SAchin Gupta	 */
1377c88f3f6SAchin Gupta	bl	tsp_main
1387c88f3f6SAchin Gupta
1397c88f3f6SAchin Gupta	/* ---------------------------------------------
1407c88f3f6SAchin Gupta	 * Tell TSPD that we are done initialising
1417c88f3f6SAchin Gupta	 * ---------------------------------------------
1427c88f3f6SAchin Gupta	 */
1437c88f3f6SAchin Gupta	mov	x1, x0
1447c88f3f6SAchin Gupta	mov	x0, #TSP_ENTRY_DONE
1457c88f3f6SAchin Gupta	smc	#0
1467c88f3f6SAchin Gupta
1477c88f3f6SAchin Guptatsp_entrypoint_panic:
1487c88f3f6SAchin Gupta	b	tsp_entrypoint_panic
1497c88f3f6SAchin Gupta
150399fb08fSAndrew Thoelke
151399fb08fSAndrew Thoelke	/* -------------------------------------------
152399fb08fSAndrew Thoelke	 * Table of entrypoint vectors provided to the
153399fb08fSAndrew Thoelke	 * TSPD for the various entrypoints
154399fb08fSAndrew Thoelke	 * -------------------------------------------
155399fb08fSAndrew Thoelke	 */
156399fb08fSAndrew Thoelkefunc tsp_vector_table
157399fb08fSAndrew Thoelke	b	tsp_std_smc_entry
158399fb08fSAndrew Thoelke	b	tsp_fast_smc_entry
159399fb08fSAndrew Thoelke	b	tsp_cpu_on_entry
160399fb08fSAndrew Thoelke	b	tsp_cpu_off_entry
161399fb08fSAndrew Thoelke	b	tsp_cpu_resume_entry
162399fb08fSAndrew Thoelke	b	tsp_cpu_suspend_entry
163399fb08fSAndrew Thoelke	b	tsp_fiq_entry
164399fb08fSAndrew Thoelke
1657c88f3f6SAchin Gupta	/*---------------------------------------------
1667c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
1677c88f3f6SAchin Gupta	 * cpu is to be turned off through a CPU_OFF
1687c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
1697c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
1707c88f3f6SAchin Gupta	 * implementation, the TSPD expects the TSP to
1717c88f3f6SAchin Gupta	 * re-initialise its state so nothing is done
1727c88f3f6SAchin Gupta	 * here except for acknowledging the request.
1737c88f3f6SAchin Gupta	 * ---------------------------------------------
1747c88f3f6SAchin Gupta	 */
1750a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry
1767c88f3f6SAchin Gupta	bl	tsp_cpu_off_main
1777c88f3f6SAchin Gupta	restore_args_call_smc
1787c88f3f6SAchin Gupta
1797c88f3f6SAchin Gupta	/*---------------------------------------------
1807c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
1817c88f3f6SAchin Gupta	 * cpu is turned on using a CPU_ON psci call to
1827c88f3f6SAchin Gupta	 * ask the TSP to initialise itself i.e. setup
1837c88f3f6SAchin Gupta	 * the mmu, stacks etc. Minimal architectural
1847c88f3f6SAchin Gupta	 * state will be initialised by the TSPD when
1857c88f3f6SAchin Gupta	 * this function is entered i.e. Caches and MMU
1867c88f3f6SAchin Gupta	 * will be turned off, the execution state
1877c88f3f6SAchin Gupta	 * will be aarch64 and exceptions masked.
1887c88f3f6SAchin Gupta	 * ---------------------------------------------
1897c88f3f6SAchin Gupta	 */
1900a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry
1917c88f3f6SAchin Gupta	/* ---------------------------------------------
1927c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
1937c88f3f6SAchin Gupta	 * ---------------------------------------------
1947c88f3f6SAchin Gupta	 */
19557356e90SAchin Gupta	adr	x0, tsp_exceptions
1967c88f3f6SAchin Gupta	msr	vbar_el1, x0
197*0c8d4fefSAchin Gupta	isb
198*0c8d4fefSAchin Gupta
199*0c8d4fefSAchin Gupta	/* Enable the SError interrupt */
200*0c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
2017c88f3f6SAchin Gupta
2027c88f3f6SAchin Gupta	/* ---------------------------------------------
203ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
204ec3c1003SAchin Gupta	 * and data access alignment checks
2057c88f3f6SAchin Gupta	 * ---------------------------------------------
2067c88f3f6SAchin Gupta	 */
207ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
2087c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
209ec3c1003SAchin Gupta	orr	x0, x0, x1
2107c88f3f6SAchin Gupta	msr	sctlr_el1, x0
2117c88f3f6SAchin Gupta	isb
2127c88f3f6SAchin Gupta
2137c88f3f6SAchin Gupta	/* --------------------------------------------
214b51da821SAchin Gupta	 * Give ourselves a stack whose memory will be
215b51da821SAchin Gupta	 * marked as Normal-IS-WBWA when the MMU is
216b51da821SAchin Gupta	 * enabled.
2177c88f3f6SAchin Gupta	 * --------------------------------------------
2187c88f3f6SAchin Gupta	 */
2197c88f3f6SAchin Gupta	mrs	x0, mpidr_el1
220b51da821SAchin Gupta	bl	platform_set_stack
2217c88f3f6SAchin Gupta
222b51da821SAchin Gupta	/* --------------------------------------------
223b51da821SAchin Gupta	 * Enable the MMU with the DCache disabled. It
224b51da821SAchin Gupta	 * is safe to use stacks allocated in normal
225b51da821SAchin Gupta	 * memory as a result. All memory accesses are
226b51da821SAchin Gupta	 * marked nGnRnE when the MMU is disabled. So
227b51da821SAchin Gupta	 * all the stack writes will make it to memory.
228b51da821SAchin Gupta	 * All memory accesses are marked Non-cacheable
229b51da821SAchin Gupta	 * when the MMU is enabled but D$ is disabled.
230b51da821SAchin Gupta	 * So used stack memory is guaranteed to be
231b51da821SAchin Gupta	 * visible immediately after the MMU is enabled
232b51da821SAchin Gupta	 * Enabling the DCache at the same time as the
233b51da821SAchin Gupta	 * MMU can lead to speculatively fetched and
234b51da821SAchin Gupta	 * possibly stale stack memory being read from
235b51da821SAchin Gupta	 * other caches. This can lead to coherency
236b51da821SAchin Gupta	 * issues.
237b51da821SAchin Gupta	 * --------------------------------------------
2387c88f3f6SAchin Gupta	 */
239b51da821SAchin Gupta	mov	x0, #DISABLE_DCACHE
240dff8e47aSDan Handley	bl	bl32_plat_enable_mmu
2417c88f3f6SAchin Gupta
2427c88f3f6SAchin Gupta	/* ---------------------------------------------
243b51da821SAchin Gupta	 * Enable the Data cache now that the MMU has
244b51da821SAchin Gupta	 * been enabled. The stack has been unwound. It
245b51da821SAchin Gupta	 * will be written first before being read. This
246b51da821SAchin Gupta	 * will invalidate any stale cache lines resi-
247b51da821SAchin Gupta	 * -dent in other caches. We assume that
248b51da821SAchin Gupta	 * interconnect coherency has been enabled for
249b51da821SAchin Gupta	 * this cluster by EL3 firmware.
2507c88f3f6SAchin Gupta	 * ---------------------------------------------
2517c88f3f6SAchin Gupta	 */
252b51da821SAchin Gupta	mrs	x0, sctlr_el1
253b51da821SAchin Gupta	orr	x0, x0, #SCTLR_C_BIT
254b51da821SAchin Gupta	msr	sctlr_el1, x0
255b51da821SAchin Gupta	isb
2567c88f3f6SAchin Gupta
2577c88f3f6SAchin Gupta	/* ---------------------------------------------
2587c88f3f6SAchin Gupta	 * Enter C runtime to perform any remaining
2597c88f3f6SAchin Gupta	 * book keeping
2607c88f3f6SAchin Gupta	 * ---------------------------------------------
2617c88f3f6SAchin Gupta	 */
2627c88f3f6SAchin Gupta	bl	tsp_cpu_on_main
2637c88f3f6SAchin Gupta	restore_args_call_smc
2647c88f3f6SAchin Gupta
2657c88f3f6SAchin Gupta	/* Should never reach here */
2667c88f3f6SAchin Guptatsp_cpu_on_entry_panic:
2677c88f3f6SAchin Gupta	b	tsp_cpu_on_entry_panic
2687c88f3f6SAchin Gupta
2697c88f3f6SAchin Gupta	/*---------------------------------------------
2707c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
2717c88f3f6SAchin Gupta	 * cpu is to be suspended through a CPU_SUSPEND
2727c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
2737c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
2747c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
2757c88f3f6SAchin Gupta	 * the EL1 state.
2767c88f3f6SAchin Gupta	 * ---------------------------------------------
2777c88f3f6SAchin Gupta	 */
2780a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry
2797c88f3f6SAchin Gupta	bl	tsp_cpu_suspend_main
2807c88f3f6SAchin Gupta	restore_args_call_smc
2817c88f3f6SAchin Gupta
2827c88f3f6SAchin Gupta	/*---------------------------------------------
2836cf89021SAchin Gupta	 * This entrypoint is used by the TSPD to pass
2846cf89021SAchin Gupta	 * control for handling a pending S-EL1 FIQ.
2856cf89021SAchin Gupta	 * 'x0' contains a magic number which indicates
2866cf89021SAchin Gupta	 * this. TSPD expects control to be handed back
2876cf89021SAchin Gupta	 * at the end of FIQ processing. This is done
2886cf89021SAchin Gupta	 * through an SMC. The handover agreement is:
2896cf89021SAchin Gupta	 *
2906cf89021SAchin Gupta	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
2916cf89021SAchin Gupta	 *    the ELR_EL3 from the non-secure state.
2926cf89021SAchin Gupta	 * 2. TSP has to preserve the callee saved
2936cf89021SAchin Gupta	 *    general purpose registers, SP_EL1/EL0 and
2946cf89021SAchin Gupta	 *    LR.
2956cf89021SAchin Gupta	 * 3. TSP has to preserve the system and vfp
2966cf89021SAchin Gupta	 *    registers (if applicable).
2976cf89021SAchin Gupta	 * 4. TSP can use 'x0-x18' to enable its C
2986cf89021SAchin Gupta	 *    runtime.
2996cf89021SAchin Gupta	 * 5. TSP returns to TSPD using an SMC with
3006cf89021SAchin Gupta	 *    'x0' = TSP_HANDLED_S_EL1_FIQ
3016cf89021SAchin Gupta	 * ---------------------------------------------
3026cf89021SAchin Gupta	 */
3036cf89021SAchin Guptafunc	tsp_fiq_entry
3046cf89021SAchin Gupta#if DEBUG
3056cf89021SAchin Gupta	mov	x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff)
3066cf89021SAchin Gupta	movk	x2, #(TSP_HANDLE_FIQ_AND_RETURN &  0xffff)
3076cf89021SAchin Gupta	cmp	x0, x2
3086cf89021SAchin Gupta	b.ne	tsp_fiq_entry_panic
3096cf89021SAchin Gupta#endif
3106cf89021SAchin Gupta	/*---------------------------------------------
3116cf89021SAchin Gupta	 * Save any previous context needed to perform
3126cf89021SAchin Gupta	 * an exception return from S-EL1 e.g. context
3136cf89021SAchin Gupta	 * from a previous IRQ. Update statistics and
3146cf89021SAchin Gupta	 * handle the FIQ before returning to the TSPD.
3156cf89021SAchin Gupta	 * IRQ/FIQs are not enabled since that will
3166cf89021SAchin Gupta	 * complicate the implementation. Execution
3176cf89021SAchin Gupta	 * will be transferred back to the normal world
3186cf89021SAchin Gupta	 * in any case. A non-zero return value from the
3196cf89021SAchin Gupta	 * fiq handler is an error.
3206cf89021SAchin Gupta	 * ---------------------------------------------
3216cf89021SAchin Gupta	 */
3226cf89021SAchin Gupta	save_eret_context x2 x3
3236cf89021SAchin Gupta	bl	tsp_update_sync_fiq_stats
3246cf89021SAchin Gupta	bl	tsp_fiq_handler
3256cf89021SAchin Gupta	cbnz	x0, tsp_fiq_entry_panic
3266cf89021SAchin Gupta	restore_eret_context x2 x3
3276cf89021SAchin Gupta	mov	x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff)
3286cf89021SAchin Gupta	movk	x0, #(TSP_HANDLED_S_EL1_FIQ &  0xffff)
3296cf89021SAchin Gupta	smc	#0
3306cf89021SAchin Gupta
3316cf89021SAchin Guptatsp_fiq_entry_panic:
3326cf89021SAchin Gupta	b	tsp_fiq_entry_panic
3336cf89021SAchin Gupta
3346cf89021SAchin Gupta	/*---------------------------------------------
3357c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
3367c88f3f6SAchin Gupta	 * cpu resumes execution after an earlier
3377c88f3f6SAchin Gupta	 * CPU_SUSPEND psci call to ask the TSP to
3387c88f3f6SAchin Gupta	 * restore its saved context. In the current
3397c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
3407c88f3f6SAchin Gupta	 * EL1 state so nothing is done here apart from
3417c88f3f6SAchin Gupta	 * acknowledging the request.
3427c88f3f6SAchin Gupta	 * ---------------------------------------------
3437c88f3f6SAchin Gupta	 */
3440a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry
3457c88f3f6SAchin Gupta	bl	tsp_cpu_resume_main
3467c88f3f6SAchin Gupta	restore_args_call_smc
3477c88f3f6SAchin Guptatsp_cpu_resume_panic:
3487c88f3f6SAchin Gupta	b	tsp_cpu_resume_panic
3497c88f3f6SAchin Gupta
3507c88f3f6SAchin Gupta	/*---------------------------------------------
3517c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD to ask
3527c88f3f6SAchin Gupta	 * the TSP to service a fast smc request.
3537c88f3f6SAchin Gupta	 * ---------------------------------------------
3547c88f3f6SAchin Gupta	 */
3550a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry
356239b04faSSoby Mathew	bl	tsp_smc_handler
3577c88f3f6SAchin Gupta	restore_args_call_smc
3587c88f3f6SAchin Guptatsp_fast_smc_entry_panic:
3597c88f3f6SAchin Gupta	b	tsp_fast_smc_entry_panic
3607c88f3f6SAchin Gupta
361239b04faSSoby Mathew	/*---------------------------------------------
362239b04faSSoby Mathew	 * This entrypoint is used by the TSPD to ask
363239b04faSSoby Mathew	 * the TSP to service a std smc request.
364239b04faSSoby Mathew	 * We will enable preemption during execution
365239b04faSSoby Mathew	 * of tsp_smc_handler.
366239b04faSSoby Mathew	 * ---------------------------------------------
367239b04faSSoby Mathew	 */
368239b04faSSoby Mathewfunc tsp_std_smc_entry
369239b04faSSoby Mathew	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
370239b04faSSoby Mathew	bl	tsp_smc_handler
371239b04faSSoby Mathew	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
372239b04faSSoby Mathew	restore_args_call_smc
373239b04faSSoby Mathewtsp_std_smc_entry_panic:
374239b04faSSoby Mathew	b	tsp_std_smc_entry_panic
375