xref: /rk3399_ARM-atf/bl32/tsp/aarch64/tsp_entrypoint.S (revision 51faada71a219a8b94cd8d8e423f0f22e9da4d8f)
17c88f3f6SAchin Gupta/*
2308d359bSDouglas Raillard * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
37c88f3f6SAchin Gupta *
47c88f3f6SAchin Gupta * Redistribution and use in source and binary forms, with or without
57c88f3f6SAchin Gupta * modification, are permitted provided that the following conditions are met:
67c88f3f6SAchin Gupta *
77c88f3f6SAchin Gupta * Redistributions of source code must retain the above copyright notice, this
87c88f3f6SAchin Gupta * list of conditions and the following disclaimer.
97c88f3f6SAchin Gupta *
107c88f3f6SAchin Gupta * Redistributions in binary form must reproduce the above copyright notice,
117c88f3f6SAchin Gupta * this list of conditions and the following disclaimer in the documentation
127c88f3f6SAchin Gupta * and/or other materials provided with the distribution.
137c88f3f6SAchin Gupta *
147c88f3f6SAchin Gupta * Neither the name of ARM nor the names of its contributors may be used
157c88f3f6SAchin Gupta * to endorse or promote products derived from this software without specific
167c88f3f6SAchin Gupta * prior written permission.
177c88f3f6SAchin Gupta *
187c88f3f6SAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
197c88f3f6SAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
207c88f3f6SAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
217c88f3f6SAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
227c88f3f6SAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
237c88f3f6SAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
247c88f3f6SAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
257c88f3f6SAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
267c88f3f6SAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
277c88f3f6SAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
287c88f3f6SAchin Gupta * POSSIBILITY OF SUCH DAMAGE.
297c88f3f6SAchin Gupta */
307c88f3f6SAchin Gupta
317c88f3f6SAchin Gupta#include <arch.h>
320a30cf54SAndrew Thoelke#include <asm_macros.S>
3397043ac9SDan Handley#include <tsp.h>
34d50ece03SAntonio Nino Diaz#include <xlat_tables_defs.h>
35da0af78aSDan Handley#include "../tsp_private.h"
367c88f3f6SAchin Gupta
377c88f3f6SAchin Gupta
387c88f3f6SAchin Gupta	.globl	tsp_entrypoint
39399fb08fSAndrew Thoelke	.globl  tsp_vector_table
407c88f3f6SAchin Gupta
41239b04faSSoby Mathew
42239b04faSSoby Mathew
437c88f3f6SAchin Gupta	/* ---------------------------------------------
447c88f3f6SAchin Gupta	 * Populate the params in x0-x7 from the pointer
457c88f3f6SAchin Gupta	 * to the smc args structure in x0.
467c88f3f6SAchin Gupta	 * ---------------------------------------------
477c88f3f6SAchin Gupta	 */
487c88f3f6SAchin Gupta	.macro restore_args_call_smc
497c88f3f6SAchin Gupta	ldp	x6, x7, [x0, #TSP_ARG6]
507c88f3f6SAchin Gupta	ldp	x4, x5, [x0, #TSP_ARG4]
517c88f3f6SAchin Gupta	ldp	x2, x3, [x0, #TSP_ARG2]
527c88f3f6SAchin Gupta	ldp	x0, x1, [x0, #TSP_ARG0]
537c88f3f6SAchin Gupta	smc	#0
547c88f3f6SAchin Gupta	.endm
557c88f3f6SAchin Gupta
566cf89021SAchin Gupta	.macro	save_eret_context reg1 reg2
576cf89021SAchin Gupta	mrs	\reg1, elr_el1
586cf89021SAchin Gupta	mrs	\reg2, spsr_el1
596cf89021SAchin Gupta	stp	\reg1, \reg2, [sp, #-0x10]!
606cf89021SAchin Gupta	stp	x30, x18, [sp, #-0x10]!
616cf89021SAchin Gupta	.endm
626cf89021SAchin Gupta
636cf89021SAchin Gupta	.macro restore_eret_context reg1 reg2
646cf89021SAchin Gupta	ldp	x30, x18, [sp], #0x10
656cf89021SAchin Gupta	ldp	\reg1, \reg2, [sp], #0x10
666cf89021SAchin Gupta	msr	elr_el1, \reg1
676cf89021SAchin Gupta	msr	spsr_el1, \reg2
686cf89021SAchin Gupta	.endm
696cf89021SAchin Gupta
706cf89021SAchin Gupta	.section	.text, "ax"
716cf89021SAchin Gupta	.align 3
727c88f3f6SAchin Gupta
730a30cf54SAndrew Thoelkefunc tsp_entrypoint
747c88f3f6SAchin Gupta
757c88f3f6SAchin Gupta	/* ---------------------------------------------
767c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
777c88f3f6SAchin Gupta	 * ---------------------------------------------
787c88f3f6SAchin Gupta	 */
7957356e90SAchin Gupta	adr	x0, tsp_exceptions
807c88f3f6SAchin Gupta	msr	vbar_el1, x0
810c8d4fefSAchin Gupta	isb
820c8d4fefSAchin Gupta
830c8d4fefSAchin Gupta	/* ---------------------------------------------
840c8d4fefSAchin Gupta	 * Enable the SError interrupt now that the
850c8d4fefSAchin Gupta	 * exception vectors have been setup.
860c8d4fefSAchin Gupta	 * ---------------------------------------------
870c8d4fefSAchin Gupta	 */
880c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
897c88f3f6SAchin Gupta
907c88f3f6SAchin Gupta	/* ---------------------------------------------
91ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
92ec3c1003SAchin Gupta	 * and data access alignment checks
937c88f3f6SAchin Gupta	 * ---------------------------------------------
947c88f3f6SAchin Gupta	 */
95ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
967c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
97ec3c1003SAchin Gupta	orr	x0, x0, x1
987c88f3f6SAchin Gupta	msr	sctlr_el1, x0
997c88f3f6SAchin Gupta	isb
1007c88f3f6SAchin Gupta
1017c88f3f6SAchin Gupta	/* ---------------------------------------------
10254dc71e7SAchin Gupta	 * Invalidate the RW memory used by the BL32
10354dc71e7SAchin Gupta	 * image. This includes the data and NOBITS
10454dc71e7SAchin Gupta	 * sections. This is done to safeguard against
10554dc71e7SAchin Gupta	 * possible corruption of this memory by dirty
10654dc71e7SAchin Gupta	 * cache lines in a system cache as a result of
10754dc71e7SAchin Gupta	 * use by an earlier boot loader stage.
10854dc71e7SAchin Gupta	 * ---------------------------------------------
10954dc71e7SAchin Gupta	 */
11054dc71e7SAchin Gupta	adr	x0, __RW_START__
11154dc71e7SAchin Gupta	adr	x1, __RW_END__
11254dc71e7SAchin Gupta	sub	x1, x1, x0
11354dc71e7SAchin Gupta	bl	inv_dcache_range
11454dc71e7SAchin Gupta
11554dc71e7SAchin Gupta	/* ---------------------------------------------
1167c88f3f6SAchin Gupta	 * Zero out NOBITS sections. There are 2 of them:
1177c88f3f6SAchin Gupta	 *   - the .bss section;
1187c88f3f6SAchin Gupta	 *   - the coherent memory section.
1197c88f3f6SAchin Gupta	 * ---------------------------------------------
1207c88f3f6SAchin Gupta	 */
1217c88f3f6SAchin Gupta	ldr	x0, =__BSS_START__
1227c88f3f6SAchin Gupta	ldr	x1, =__BSS_SIZE__
123308d359bSDouglas Raillard	bl	zeromem
1247c88f3f6SAchin Gupta
125ab8707e6SSoby Mathew#if USE_COHERENT_MEM
1267c88f3f6SAchin Gupta	ldr	x0, =__COHERENT_RAM_START__
1277c88f3f6SAchin Gupta	ldr	x1, =__COHERENT_RAM_UNALIGNED_SIZE__
128308d359bSDouglas Raillard	bl	zeromem
129ab8707e6SSoby Mathew#endif
1307c88f3f6SAchin Gupta
1317c88f3f6SAchin Gupta	/* --------------------------------------------
132754a2b7aSAchin Gupta	 * Allocate a stack whose memory will be marked
133754a2b7aSAchin Gupta	 * as Normal-IS-WBWA when the MMU is enabled.
134754a2b7aSAchin Gupta	 * There is no risk of reading stale stack
135754a2b7aSAchin Gupta	 * memory after enabling the MMU as only the
136754a2b7aSAchin Gupta	 * primary cpu is running at the moment.
1377c88f3f6SAchin Gupta	 * --------------------------------------------
1387c88f3f6SAchin Gupta	 */
139fd650ff6SSoby Mathew	bl	plat_set_my_stack
1407c88f3f6SAchin Gupta
1417c88f3f6SAchin Gupta	/* ---------------------------------------------
142*51faada7SDouglas Raillard	 * Initialize the stack protector canary before
143*51faada7SDouglas Raillard	 * any C code is called.
144*51faada7SDouglas Raillard	 * ---------------------------------------------
145*51faada7SDouglas Raillard	 */
146*51faada7SDouglas Raillard#if STACK_PROTECTOR_ENABLED
147*51faada7SDouglas Raillard	bl	update_stack_protector_canary
148*51faada7SDouglas Raillard#endif
149*51faada7SDouglas Raillard
150*51faada7SDouglas Raillard	/* ---------------------------------------------
1517c88f3f6SAchin Gupta	 * Perform early platform setup & platform
1527c88f3f6SAchin Gupta	 * specific early arch. setup e.g. mmu setup
1537c88f3f6SAchin Gupta	 * ---------------------------------------------
1547c88f3f6SAchin Gupta	 */
1555a06bb7eSDan Handley	bl	tsp_early_platform_setup
1565a06bb7eSDan Handley	bl	tsp_plat_arch_setup
1577c88f3f6SAchin Gupta
1587c88f3f6SAchin Gupta	/* ---------------------------------------------
1597c88f3f6SAchin Gupta	 * Jump to main function.
1607c88f3f6SAchin Gupta	 * ---------------------------------------------
1617c88f3f6SAchin Gupta	 */
1627c88f3f6SAchin Gupta	bl	tsp_main
1637c88f3f6SAchin Gupta
1647c88f3f6SAchin Gupta	/* ---------------------------------------------
1657c88f3f6SAchin Gupta	 * Tell TSPD that we are done initialising
1667c88f3f6SAchin Gupta	 * ---------------------------------------------
1677c88f3f6SAchin Gupta	 */
1687c88f3f6SAchin Gupta	mov	x1, x0
1697c88f3f6SAchin Gupta	mov	x0, #TSP_ENTRY_DONE
1707c88f3f6SAchin Gupta	smc	#0
1717c88f3f6SAchin Gupta
1727c88f3f6SAchin Guptatsp_entrypoint_panic:
1737c88f3f6SAchin Gupta	b	tsp_entrypoint_panic
1748b779620SKévin Petitendfunc tsp_entrypoint
1757c88f3f6SAchin Gupta
176399fb08fSAndrew Thoelke
177399fb08fSAndrew Thoelke	/* -------------------------------------------
178399fb08fSAndrew Thoelke	 * Table of entrypoint vectors provided to the
179399fb08fSAndrew Thoelke	 * TSPD for the various entrypoints
180399fb08fSAndrew Thoelke	 * -------------------------------------------
181399fb08fSAndrew Thoelke	 */
182399fb08fSAndrew Thoelkefunc tsp_vector_table
183399fb08fSAndrew Thoelke	b	tsp_std_smc_entry
184399fb08fSAndrew Thoelke	b	tsp_fast_smc_entry
185399fb08fSAndrew Thoelke	b	tsp_cpu_on_entry
186399fb08fSAndrew Thoelke	b	tsp_cpu_off_entry
187399fb08fSAndrew Thoelke	b	tsp_cpu_resume_entry
188399fb08fSAndrew Thoelke	b	tsp_cpu_suspend_entry
18902446137SSoby Mathew	b	tsp_sel1_intr_entry
190d5f13093SJuan Castillo	b	tsp_system_off_entry
191d5f13093SJuan Castillo	b	tsp_system_reset_entry
1923df6012aSDouglas Raillard	b	tsp_abort_std_smc_entry
1938b779620SKévin Petitendfunc tsp_vector_table
194399fb08fSAndrew Thoelke
1957c88f3f6SAchin Gupta	/*---------------------------------------------
1967c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
1977c88f3f6SAchin Gupta	 * cpu is to be turned off through a CPU_OFF
1987c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
1997c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
2007c88f3f6SAchin Gupta	 * implementation, the TSPD expects the TSP to
2017c88f3f6SAchin Gupta	 * re-initialise its state so nothing is done
2027c88f3f6SAchin Gupta	 * here except for acknowledging the request.
2037c88f3f6SAchin Gupta	 * ---------------------------------------------
2047c88f3f6SAchin Gupta	 */
2050a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry
2067c88f3f6SAchin Gupta	bl	tsp_cpu_off_main
2077c88f3f6SAchin Gupta	restore_args_call_smc
2088b779620SKévin Petitendfunc tsp_cpu_off_entry
2097c88f3f6SAchin Gupta
2107c88f3f6SAchin Gupta	/*---------------------------------------------
211d5f13093SJuan Castillo	 * This entrypoint is used by the TSPD when the
212d5f13093SJuan Castillo	 * system is about to be switched off (through
213d5f13093SJuan Castillo	 * a SYSTEM_OFF psci call) to ask the TSP to
214d5f13093SJuan Castillo	 * perform any necessary bookkeeping.
215d5f13093SJuan Castillo	 * ---------------------------------------------
216d5f13093SJuan Castillo	 */
217d5f13093SJuan Castillofunc tsp_system_off_entry
218d5f13093SJuan Castillo	bl	tsp_system_off_main
219d5f13093SJuan Castillo	restore_args_call_smc
2208b779620SKévin Petitendfunc tsp_system_off_entry
221d5f13093SJuan Castillo
222d5f13093SJuan Castillo	/*---------------------------------------------
223d5f13093SJuan Castillo	 * This entrypoint is used by the TSPD when the
224d5f13093SJuan Castillo	 * system is about to be reset (through a
225d5f13093SJuan Castillo	 * SYSTEM_RESET psci call) to ask the TSP to
226d5f13093SJuan Castillo	 * perform any necessary bookkeeping.
227d5f13093SJuan Castillo	 * ---------------------------------------------
228d5f13093SJuan Castillo	 */
229d5f13093SJuan Castillofunc tsp_system_reset_entry
230d5f13093SJuan Castillo	bl	tsp_system_reset_main
231d5f13093SJuan Castillo	restore_args_call_smc
2328b779620SKévin Petitendfunc tsp_system_reset_entry
233d5f13093SJuan Castillo
234d5f13093SJuan Castillo	/*---------------------------------------------
2357c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
2367c88f3f6SAchin Gupta	 * cpu is turned on using a CPU_ON psci call to
2377c88f3f6SAchin Gupta	 * ask the TSP to initialise itself i.e. setup
2387c88f3f6SAchin Gupta	 * the mmu, stacks etc. Minimal architectural
2397c88f3f6SAchin Gupta	 * state will be initialised by the TSPD when
2407c88f3f6SAchin Gupta	 * this function is entered i.e. Caches and MMU
2417c88f3f6SAchin Gupta	 * will be turned off, the execution state
2427c88f3f6SAchin Gupta	 * will be aarch64 and exceptions masked.
2437c88f3f6SAchin Gupta	 * ---------------------------------------------
2447c88f3f6SAchin Gupta	 */
2450a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry
2467c88f3f6SAchin Gupta	/* ---------------------------------------------
2477c88f3f6SAchin Gupta	 * Set the exception vector to something sane.
2487c88f3f6SAchin Gupta	 * ---------------------------------------------
2497c88f3f6SAchin Gupta	 */
25057356e90SAchin Gupta	adr	x0, tsp_exceptions
2517c88f3f6SAchin Gupta	msr	vbar_el1, x0
2520c8d4fefSAchin Gupta	isb
2530c8d4fefSAchin Gupta
2540c8d4fefSAchin Gupta	/* Enable the SError interrupt */
2550c8d4fefSAchin Gupta	msr	daifclr, #DAIF_ABT_BIT
2567c88f3f6SAchin Gupta
2577c88f3f6SAchin Gupta	/* ---------------------------------------------
258ec3c1003SAchin Gupta	 * Enable the instruction cache, stack pointer
259ec3c1003SAchin Gupta	 * and data access alignment checks
2607c88f3f6SAchin Gupta	 * ---------------------------------------------
2617c88f3f6SAchin Gupta	 */
262ec3c1003SAchin Gupta	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
2637c88f3f6SAchin Gupta	mrs	x0, sctlr_el1
264ec3c1003SAchin Gupta	orr	x0, x0, x1
2657c88f3f6SAchin Gupta	msr	sctlr_el1, x0
2667c88f3f6SAchin Gupta	isb
2677c88f3f6SAchin Gupta
2687c88f3f6SAchin Gupta	/* --------------------------------------------
269b51da821SAchin Gupta	 * Give ourselves a stack whose memory will be
270b51da821SAchin Gupta	 * marked as Normal-IS-WBWA when the MMU is
271b51da821SAchin Gupta	 * enabled.
2727c88f3f6SAchin Gupta	 * --------------------------------------------
2737c88f3f6SAchin Gupta	 */
274fd650ff6SSoby Mathew	bl	plat_set_my_stack
2757c88f3f6SAchin Gupta
276b51da821SAchin Gupta	/* --------------------------------------------
277b51da821SAchin Gupta	 * Enable the MMU with the DCache disabled. It
278b51da821SAchin Gupta	 * is safe to use stacks allocated in normal
279b51da821SAchin Gupta	 * memory as a result. All memory accesses are
280b51da821SAchin Gupta	 * marked nGnRnE when the MMU is disabled. So
281b51da821SAchin Gupta	 * all the stack writes will make it to memory.
282b51da821SAchin Gupta	 * All memory accesses are marked Non-cacheable
283b51da821SAchin Gupta	 * when the MMU is enabled but D$ is disabled.
284b51da821SAchin Gupta	 * So used stack memory is guaranteed to be
285b51da821SAchin Gupta	 * visible immediately after the MMU is enabled
286b51da821SAchin Gupta	 * Enabling the DCache at the same time as the
287b51da821SAchin Gupta	 * MMU can lead to speculatively fetched and
288b51da821SAchin Gupta	 * possibly stale stack memory being read from
289b51da821SAchin Gupta	 * other caches. This can lead to coherency
290b51da821SAchin Gupta	 * issues.
291b51da821SAchin Gupta	 * --------------------------------------------
2927c88f3f6SAchin Gupta	 */
293b51da821SAchin Gupta	mov	x0, #DISABLE_DCACHE
294dff8e47aSDan Handley	bl	bl32_plat_enable_mmu
2957c88f3f6SAchin Gupta
2967c88f3f6SAchin Gupta	/* ---------------------------------------------
297b51da821SAchin Gupta	 * Enable the Data cache now that the MMU has
298b51da821SAchin Gupta	 * been enabled. The stack has been unwound. It
299b51da821SAchin Gupta	 * will be written first before being read. This
300b51da821SAchin Gupta	 * will invalidate any stale cache lines resi-
301b51da821SAchin Gupta	 * -dent in other caches. We assume that
302b51da821SAchin Gupta	 * interconnect coherency has been enabled for
303b51da821SAchin Gupta	 * this cluster by EL3 firmware.
3047c88f3f6SAchin Gupta	 * ---------------------------------------------
3057c88f3f6SAchin Gupta	 */
306b51da821SAchin Gupta	mrs	x0, sctlr_el1
307b51da821SAchin Gupta	orr	x0, x0, #SCTLR_C_BIT
308b51da821SAchin Gupta	msr	sctlr_el1, x0
309b51da821SAchin Gupta	isb
3107c88f3f6SAchin Gupta
3117c88f3f6SAchin Gupta	/* ---------------------------------------------
3127c88f3f6SAchin Gupta	 * Enter C runtime to perform any remaining
3137c88f3f6SAchin Gupta	 * book keeping
3147c88f3f6SAchin Gupta	 * ---------------------------------------------
3157c88f3f6SAchin Gupta	 */
3167c88f3f6SAchin Gupta	bl	tsp_cpu_on_main
3177c88f3f6SAchin Gupta	restore_args_call_smc
3187c88f3f6SAchin Gupta
3197c88f3f6SAchin Gupta	/* Should never reach here */
3207c88f3f6SAchin Guptatsp_cpu_on_entry_panic:
3217c88f3f6SAchin Gupta	b	tsp_cpu_on_entry_panic
3228b779620SKévin Petitendfunc tsp_cpu_on_entry
3237c88f3f6SAchin Gupta
3247c88f3f6SAchin Gupta	/*---------------------------------------------
3257c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
3267c88f3f6SAchin Gupta	 * cpu is to be suspended through a CPU_SUSPEND
3277c88f3f6SAchin Gupta	 * psci call to ask the TSP to perform any
3287c88f3f6SAchin Gupta	 * bookeeping necessary. In the current
3297c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
3307c88f3f6SAchin Gupta	 * the EL1 state.
3317c88f3f6SAchin Gupta	 * ---------------------------------------------
3327c88f3f6SAchin Gupta	 */
3330a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry
3347c88f3f6SAchin Gupta	bl	tsp_cpu_suspend_main
3357c88f3f6SAchin Gupta	restore_args_call_smc
3368b779620SKévin Petitendfunc tsp_cpu_suspend_entry
3377c88f3f6SAchin Gupta
33802446137SSoby Mathew	/*-------------------------------------------------
3396cf89021SAchin Gupta	 * This entrypoint is used by the TSPD to pass
34063b8440fSSoby Mathew	 * control for `synchronously` handling a S-EL1
34163b8440fSSoby Mathew	 * Interrupt which was triggered while executing
34263b8440fSSoby Mathew	 * in normal world. 'x0' contains a magic number
34363b8440fSSoby Mathew	 * which indicates this. TSPD expects control to
34463b8440fSSoby Mathew	 * be handed back at the end of interrupt
34563b8440fSSoby Mathew	 * processing. This is done through an SMC.
34663b8440fSSoby Mathew	 * The handover agreement is:
3476cf89021SAchin Gupta	 *
3486cf89021SAchin Gupta	 * 1. PSTATE.DAIF are set upon entry. 'x1' has
3496cf89021SAchin Gupta	 *    the ELR_EL3 from the non-secure state.
3506cf89021SAchin Gupta	 * 2. TSP has to preserve the callee saved
3516cf89021SAchin Gupta	 *    general purpose registers, SP_EL1/EL0 and
3526cf89021SAchin Gupta	 *    LR.
3536cf89021SAchin Gupta	 * 3. TSP has to preserve the system and vfp
3546cf89021SAchin Gupta	 *    registers (if applicable).
3556cf89021SAchin Gupta	 * 4. TSP can use 'x0-x18' to enable its C
3566cf89021SAchin Gupta	 *    runtime.
3576cf89021SAchin Gupta	 * 5. TSP returns to TSPD using an SMC with
35802446137SSoby Mathew	 *    'x0' = TSP_HANDLED_S_EL1_INTR
35902446137SSoby Mathew	 * ------------------------------------------------
3606cf89021SAchin Gupta	 */
36102446137SSoby Mathewfunc	tsp_sel1_intr_entry
3626cf89021SAchin Gupta#if DEBUG
36363b8440fSSoby Mathew	mov_imm	x2, TSP_HANDLE_SEL1_INTR_AND_RETURN
3646cf89021SAchin Gupta	cmp	x0, x2
36502446137SSoby Mathew	b.ne	tsp_sel1_int_entry_panic
3666cf89021SAchin Gupta#endif
36702446137SSoby Mathew	/*-------------------------------------------------
3686cf89021SAchin Gupta	 * Save any previous context needed to perform
3696cf89021SAchin Gupta	 * an exception return from S-EL1 e.g. context
37002446137SSoby Mathew	 * from a previous Non secure Interrupt.
37102446137SSoby Mathew	 * Update statistics and handle the S-EL1
37202446137SSoby Mathew	 * interrupt before returning to the TSPD.
3736cf89021SAchin Gupta	 * IRQ/FIQs are not enabled since that will
3746cf89021SAchin Gupta	 * complicate the implementation. Execution
3756cf89021SAchin Gupta	 * will be transferred back to the normal world
37663b8440fSSoby Mathew	 * in any case. The handler can return 0
37763b8440fSSoby Mathew	 * if the interrupt was handled or TSP_PREEMPTED
37863b8440fSSoby Mathew	 * if the expected interrupt was preempted
37963b8440fSSoby Mathew	 * by an interrupt that should be handled in EL3
38063b8440fSSoby Mathew	 * e.g. Group 0 interrupt in GICv3. In both
38163b8440fSSoby Mathew	 * the cases switch to EL3 using SMC with id
38263b8440fSSoby Mathew	 * TSP_HANDLED_S_EL1_INTR. Any other return value
38363b8440fSSoby Mathew	 * from the handler will result in panic.
38402446137SSoby Mathew	 * ------------------------------------------------
3856cf89021SAchin Gupta	 */
3866cf89021SAchin Gupta	save_eret_context x2 x3
38702446137SSoby Mathew	bl	tsp_update_sync_sel1_intr_stats
38802446137SSoby Mathew	bl	tsp_common_int_handler
38963b8440fSSoby Mathew	/* Check if the S-EL1 interrupt has been handled */
39063b8440fSSoby Mathew	cbnz	x0, tsp_sel1_intr_check_preemption
39163b8440fSSoby Mathew	b	tsp_sel1_intr_return
39263b8440fSSoby Mathewtsp_sel1_intr_check_preemption:
39363b8440fSSoby Mathew	/* Check if the S-EL1 interrupt has been preempted */
39463b8440fSSoby Mathew	mov_imm	x1, TSP_PREEMPTED
39563b8440fSSoby Mathew	cmp	x0, x1
39663b8440fSSoby Mathew	b.ne	tsp_sel1_int_entry_panic
39763b8440fSSoby Mathewtsp_sel1_intr_return:
39863b8440fSSoby Mathew	mov_imm	x0, TSP_HANDLED_S_EL1_INTR
3996cf89021SAchin Gupta	restore_eret_context x2 x3
4006cf89021SAchin Gupta	smc	#0
4016cf89021SAchin Gupta
40263b8440fSSoby Mathew	/* Should never reach here */
40302446137SSoby Mathewtsp_sel1_int_entry_panic:
404a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
40502446137SSoby Mathewendfunc tsp_sel1_intr_entry
4066cf89021SAchin Gupta
4076cf89021SAchin Gupta	/*---------------------------------------------
4087c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD when this
4097c88f3f6SAchin Gupta	 * cpu resumes execution after an earlier
4107c88f3f6SAchin Gupta	 * CPU_SUSPEND psci call to ask the TSP to
4117c88f3f6SAchin Gupta	 * restore its saved context. In the current
4127c88f3f6SAchin Gupta	 * implementation, the TSPD saves and restores
4137c88f3f6SAchin Gupta	 * EL1 state so nothing is done here apart from
4147c88f3f6SAchin Gupta	 * acknowledging the request.
4157c88f3f6SAchin Gupta	 * ---------------------------------------------
4167c88f3f6SAchin Gupta	 */
4170a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry
4187c88f3f6SAchin Gupta	bl	tsp_cpu_resume_main
4197c88f3f6SAchin Gupta	restore_args_call_smc
4201c3ea103SAntonio Nino Diaz
4211c3ea103SAntonio Nino Diaz	/* Should never reach here */
422a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
4238b779620SKévin Petitendfunc tsp_cpu_resume_entry
4247c88f3f6SAchin Gupta
4257c88f3f6SAchin Gupta	/*---------------------------------------------
4267c88f3f6SAchin Gupta	 * This entrypoint is used by the TSPD to ask
4277c88f3f6SAchin Gupta	 * the TSP to service a fast smc request.
4287c88f3f6SAchin Gupta	 * ---------------------------------------------
4297c88f3f6SAchin Gupta	 */
4300a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry
431239b04faSSoby Mathew	bl	tsp_smc_handler
4327c88f3f6SAchin Gupta	restore_args_call_smc
4331c3ea103SAntonio Nino Diaz
4341c3ea103SAntonio Nino Diaz	/* Should never reach here */
435a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
4368b779620SKévin Petitendfunc tsp_fast_smc_entry
4377c88f3f6SAchin Gupta
438239b04faSSoby Mathew	/*---------------------------------------------
439239b04faSSoby Mathew	 * This entrypoint is used by the TSPD to ask
440239b04faSSoby Mathew	 * the TSP to service a std smc request.
441239b04faSSoby Mathew	 * We will enable preemption during execution
442239b04faSSoby Mathew	 * of tsp_smc_handler.
443239b04faSSoby Mathew	 * ---------------------------------------------
444239b04faSSoby Mathew	 */
445239b04faSSoby Mathewfunc tsp_std_smc_entry
446239b04faSSoby Mathew	msr	daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
447239b04faSSoby Mathew	bl	tsp_smc_handler
448239b04faSSoby Mathew	msr	daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT
449239b04faSSoby Mathew	restore_args_call_smc
4501c3ea103SAntonio Nino Diaz
4511c3ea103SAntonio Nino Diaz	/* Should never reach here */
452a806dad5SJeenu Viswambharan	no_ret	plat_panic_handler
4538b779620SKévin Petitendfunc tsp_std_smc_entry
4543df6012aSDouglas Raillard
4553df6012aSDouglas Raillard	/*---------------------------------------------------------------------
4563df6012aSDouglas Raillard	 * This entrypoint is used by the TSPD to abort a pre-empted Standard
4573df6012aSDouglas Raillard	 * SMC. It could be on behalf of non-secure world or because a CPU
4583df6012aSDouglas Raillard	 * suspend/CPU off request needs to abort the preempted SMC.
4593df6012aSDouglas Raillard	 * --------------------------------------------------------------------
4603df6012aSDouglas Raillard	 */
4613df6012aSDouglas Raillardfunc tsp_abort_std_smc_entry
4623df6012aSDouglas Raillard
4633df6012aSDouglas Raillard	/*
4643df6012aSDouglas Raillard	 * Exceptions masking is already done by the TSPD when entering this
4653df6012aSDouglas Raillard	 * hook so there is no need to do it here.
4663df6012aSDouglas Raillard	 */
4673df6012aSDouglas Raillard
4683df6012aSDouglas Raillard	/* Reset the stack used by the pre-empted SMC */
4693df6012aSDouglas Raillard	bl	plat_set_my_stack
4703df6012aSDouglas Raillard
4713df6012aSDouglas Raillard	/*
4723df6012aSDouglas Raillard	 * Allow some cleanup such as releasing locks.
4733df6012aSDouglas Raillard	 */
4743df6012aSDouglas Raillard	bl	tsp_abort_smc_handler
4753df6012aSDouglas Raillard
4763df6012aSDouglas Raillard	restore_args_call_smc
4773df6012aSDouglas Raillard
4783df6012aSDouglas Raillard	/* Should never reach here */
4793df6012aSDouglas Raillard	bl	plat_panic_handler
4803df6012aSDouglas Raillardendfunc tsp_abort_std_smc_entry
481