17c88f3f6SAchin Gupta/* 2*fb4f511fSYann Gautier * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. 37c88f3f6SAchin Gupta * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 57c88f3f6SAchin Gupta */ 67c88f3f6SAchin Gupta 7d974301dSMasahiro Yamada#include <platform_def.h> 8d974301dSMasahiro Yamada 97c88f3f6SAchin Gupta#include <arch.h> 100a30cf54SAndrew Thoelke#include <asm_macros.S> 1109d40e0eSAntonio Nino Diaz#include <bl32/tsp/tsp.h> 1209d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h> 1309d40e0eSAntonio Nino Diaz 14da0af78aSDan Handley#include "../tsp_private.h" 157c88f3f6SAchin Gupta 167c88f3f6SAchin Gupta 177c88f3f6SAchin Gupta .globl tsp_entrypoint 18399fb08fSAndrew Thoelke .globl tsp_vector_table 197c88f3f6SAchin Gupta 20239b04faSSoby Mathew 21239b04faSSoby Mathew 227c88f3f6SAchin Gupta /* --------------------------------------------- 237c88f3f6SAchin Gupta * Populate the params in x0-x7 from the pointer 247c88f3f6SAchin Gupta * to the smc args structure in x0. 257c88f3f6SAchin Gupta * --------------------------------------------- 267c88f3f6SAchin Gupta */ 277c88f3f6SAchin Gupta .macro restore_args_call_smc 287c88f3f6SAchin Gupta ldp x6, x7, [x0, #TSP_ARG6] 297c88f3f6SAchin Gupta ldp x4, x5, [x0, #TSP_ARG4] 307c88f3f6SAchin Gupta ldp x2, x3, [x0, #TSP_ARG2] 317c88f3f6SAchin Gupta ldp x0, x1, [x0, #TSP_ARG0] 327c88f3f6SAchin Gupta smc #0 337c88f3f6SAchin Gupta .endm 347c88f3f6SAchin Gupta 356cf89021SAchin Gupta .macro save_eret_context reg1 reg2 366cf89021SAchin Gupta mrs \reg1, elr_el1 376cf89021SAchin Gupta mrs \reg2, spsr_el1 386cf89021SAchin Gupta stp \reg1, \reg2, [sp, #-0x10]! 396cf89021SAchin Gupta stp x30, x18, [sp, #-0x10]! 406cf89021SAchin Gupta .endm 416cf89021SAchin Gupta 426cf89021SAchin Gupta .macro restore_eret_context reg1 reg2 436cf89021SAchin Gupta ldp x30, x18, [sp], #0x10 446cf89021SAchin Gupta ldp \reg1, \reg2, [sp], #0x10 456cf89021SAchin Gupta msr elr_el1, \reg1 466cf89021SAchin Gupta msr spsr_el1, \reg2 476cf89021SAchin Gupta .endm 486cf89021SAchin Gupta 4964726e6dSJulius Wernerfunc tsp_entrypoint _align=3 507c88f3f6SAchin Gupta 51d974301dSMasahiro Yamada#if ENABLE_PIE 52d974301dSMasahiro Yamada /* 53d974301dSMasahiro Yamada * ------------------------------------------------------------ 54d974301dSMasahiro Yamada * If PIE is enabled fixup the Global descriptor Table only 55d974301dSMasahiro Yamada * once during primary core cold boot path. 56d974301dSMasahiro Yamada * 57d974301dSMasahiro Yamada * Compile time base address, required for fixup, is calculated 58d974301dSMasahiro Yamada * using "pie_fixup" label present within first page. 59d974301dSMasahiro Yamada * ------------------------------------------------------------ 60d974301dSMasahiro Yamada */ 61d974301dSMasahiro Yamada pie_fixup: 62d974301dSMasahiro Yamada ldr x0, =pie_fixup 63d7b5f408SJimmy Brisson and x0, x0, #~(PAGE_SIZE_MASK) 64d974301dSMasahiro Yamada mov_imm x1, (BL32_LIMIT - BL32_BASE) 65d974301dSMasahiro Yamada add x1, x1, x0 66d974301dSMasahiro Yamada bl fixup_gdt_reloc 67d974301dSMasahiro Yamada#endif /* ENABLE_PIE */ 68d974301dSMasahiro Yamada 697c88f3f6SAchin Gupta /* --------------------------------------------- 707c88f3f6SAchin Gupta * Set the exception vector to something sane. 717c88f3f6SAchin Gupta * --------------------------------------------- 727c88f3f6SAchin Gupta */ 7357356e90SAchin Gupta adr x0, tsp_exceptions 747c88f3f6SAchin Gupta msr vbar_el1, x0 750c8d4fefSAchin Gupta isb 760c8d4fefSAchin Gupta 770c8d4fefSAchin Gupta /* --------------------------------------------- 780c8d4fefSAchin Gupta * Enable the SError interrupt now that the 790c8d4fefSAchin Gupta * exception vectors have been setup. 800c8d4fefSAchin Gupta * --------------------------------------------- 810c8d4fefSAchin Gupta */ 820c8d4fefSAchin Gupta msr daifclr, #DAIF_ABT_BIT 837c88f3f6SAchin Gupta 847c88f3f6SAchin Gupta /* --------------------------------------------- 85ec3c1003SAchin Gupta * Enable the instruction cache, stack pointer 8602b57943SJohn Tsichritzis * and data access alignment checks and disable 8702b57943SJohn Tsichritzis * speculative loads. 887c88f3f6SAchin Gupta * --------------------------------------------- 897c88f3f6SAchin Gupta */ 90ec3c1003SAchin Gupta mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 917c88f3f6SAchin Gupta mrs x0, sctlr_el1 92ec3c1003SAchin Gupta orr x0, x0, x1 9302b57943SJohn Tsichritzis bic x0, x0, #SCTLR_DSSBS_BIT 947c88f3f6SAchin Gupta msr sctlr_el1, x0 957c88f3f6SAchin Gupta isb 967c88f3f6SAchin Gupta 977c88f3f6SAchin Gupta /* --------------------------------------------- 9854dc71e7SAchin Gupta * Invalidate the RW memory used by the BL32 9954dc71e7SAchin Gupta * image. This includes the data and NOBITS 10054dc71e7SAchin Gupta * sections. This is done to safeguard against 10154dc71e7SAchin Gupta * possible corruption of this memory by dirty 10254dc71e7SAchin Gupta * cache lines in a system cache as a result of 10354dc71e7SAchin Gupta * use by an earlier boot loader stage. 10454dc71e7SAchin Gupta * --------------------------------------------- 10554dc71e7SAchin Gupta */ 10654dc71e7SAchin Gupta adr x0, __RW_START__ 10754dc71e7SAchin Gupta adr x1, __RW_END__ 10854dc71e7SAchin Gupta sub x1, x1, x0 10954dc71e7SAchin Gupta bl inv_dcache_range 11054dc71e7SAchin Gupta 11154dc71e7SAchin Gupta /* --------------------------------------------- 1127c88f3f6SAchin Gupta * Zero out NOBITS sections. There are 2 of them: 1137c88f3f6SAchin Gupta * - the .bss section; 1147c88f3f6SAchin Gupta * - the coherent memory section. 1157c88f3f6SAchin Gupta * --------------------------------------------- 1167c88f3f6SAchin Gupta */ 117*fb4f511fSYann Gautier adrp x0, __BSS_START__ 118*fb4f511fSYann Gautier add x0, x0, :lo12:__BSS_START__ 119*fb4f511fSYann Gautier adrp x1, __BSS_END__ 120*fb4f511fSYann Gautier add x1, x1, :lo12:__BSS_END__ 121*fb4f511fSYann Gautier sub x1, x1, x0 122308d359bSDouglas Raillard bl zeromem 1237c88f3f6SAchin Gupta 124ab8707e6SSoby Mathew#if USE_COHERENT_MEM 125*fb4f511fSYann Gautier adrp x0, __COHERENT_RAM_START__ 126*fb4f511fSYann Gautier add x0, x0, :lo12:__COHERENT_RAM_START__ 127*fb4f511fSYann Gautier adrp x1, __COHERENT_RAM_END_UNALIGNED__ 128*fb4f511fSYann Gautier add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__ 129*fb4f511fSYann Gautier sub x1, x1, x0 130308d359bSDouglas Raillard bl zeromem 131ab8707e6SSoby Mathew#endif 1327c88f3f6SAchin Gupta 1337c88f3f6SAchin Gupta /* -------------------------------------------- 134754a2b7aSAchin Gupta * Allocate a stack whose memory will be marked 135754a2b7aSAchin Gupta * as Normal-IS-WBWA when the MMU is enabled. 136754a2b7aSAchin Gupta * There is no risk of reading stale stack 137754a2b7aSAchin Gupta * memory after enabling the MMU as only the 138754a2b7aSAchin Gupta * primary cpu is running at the moment. 1397c88f3f6SAchin Gupta * -------------------------------------------- 1407c88f3f6SAchin Gupta */ 141fd650ff6SSoby Mathew bl plat_set_my_stack 1427c88f3f6SAchin Gupta 1437c88f3f6SAchin Gupta /* --------------------------------------------- 14451faada7SDouglas Raillard * Initialize the stack protector canary before 14551faada7SDouglas Raillard * any C code is called. 14651faada7SDouglas Raillard * --------------------------------------------- 14751faada7SDouglas Raillard */ 14851faada7SDouglas Raillard#if STACK_PROTECTOR_ENABLED 14951faada7SDouglas Raillard bl update_stack_protector_canary 15051faada7SDouglas Raillard#endif 15151faada7SDouglas Raillard 15251faada7SDouglas Raillard /* --------------------------------------------- 15367b6ff9fSAntonio Nino Diaz * Perform TSP setup 1547c88f3f6SAchin Gupta * --------------------------------------------- 1557c88f3f6SAchin Gupta */ 15667b6ff9fSAntonio Nino Diaz bl tsp_setup 15767b6ff9fSAntonio Nino Diaz 15867b6ff9fSAntonio Nino Diaz#if ENABLE_PAUTH 1599fc59639SAlexei Fedorov /* --------------------------------------------- 160ed108b56SAlexei Fedorov * Program APIAKey_EL1 161ed108b56SAlexei Fedorov * and enable pointer authentication 1629fc59639SAlexei Fedorov * --------------------------------------------- 1639fc59639SAlexei Fedorov */ 164ed108b56SAlexei Fedorov bl pauth_init_enable_el1 16567b6ff9fSAntonio Nino Diaz#endif /* ENABLE_PAUTH */ 1667c88f3f6SAchin Gupta 1677c88f3f6SAchin Gupta /* --------------------------------------------- 1687c88f3f6SAchin Gupta * Jump to main function. 1697c88f3f6SAchin Gupta * --------------------------------------------- 1707c88f3f6SAchin Gupta */ 1717c88f3f6SAchin Gupta bl tsp_main 1727c88f3f6SAchin Gupta 1737c88f3f6SAchin Gupta /* --------------------------------------------- 1747c88f3f6SAchin Gupta * Tell TSPD that we are done initialising 1757c88f3f6SAchin Gupta * --------------------------------------------- 1767c88f3f6SAchin Gupta */ 1777c88f3f6SAchin Gupta mov x1, x0 1787c88f3f6SAchin Gupta mov x0, #TSP_ENTRY_DONE 1797c88f3f6SAchin Gupta smc #0 1807c88f3f6SAchin Gupta 1817c88f3f6SAchin Guptatsp_entrypoint_panic: 1827c88f3f6SAchin Gupta b tsp_entrypoint_panic 1838b779620SKévin Petitendfunc tsp_entrypoint 1847c88f3f6SAchin Gupta 185399fb08fSAndrew Thoelke 186399fb08fSAndrew Thoelke /* ------------------------------------------- 187399fb08fSAndrew Thoelke * Table of entrypoint vectors provided to the 188399fb08fSAndrew Thoelke * TSPD for the various entrypoints 189399fb08fSAndrew Thoelke * ------------------------------------------- 190399fb08fSAndrew Thoelke */ 1919fc59639SAlexei Fedorovvector_base tsp_vector_table 19216292f54SDavid Cunado b tsp_yield_smc_entry 193399fb08fSAndrew Thoelke b tsp_fast_smc_entry 194399fb08fSAndrew Thoelke b tsp_cpu_on_entry 195399fb08fSAndrew Thoelke b tsp_cpu_off_entry 196399fb08fSAndrew Thoelke b tsp_cpu_resume_entry 197399fb08fSAndrew Thoelke b tsp_cpu_suspend_entry 19802446137SSoby Mathew b tsp_sel1_intr_entry 199d5f13093SJuan Castillo b tsp_system_off_entry 200d5f13093SJuan Castillo b tsp_system_reset_entry 20116292f54SDavid Cunado b tsp_abort_yield_smc_entry 202399fb08fSAndrew Thoelke 2037c88f3f6SAchin Gupta /*--------------------------------------------- 2047c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 2057c88f3f6SAchin Gupta * cpu is to be turned off through a CPU_OFF 2067c88f3f6SAchin Gupta * psci call to ask the TSP to perform any 2077c88f3f6SAchin Gupta * bookeeping necessary. In the current 2087c88f3f6SAchin Gupta * implementation, the TSPD expects the TSP to 2097c88f3f6SAchin Gupta * re-initialise its state so nothing is done 2107c88f3f6SAchin Gupta * here except for acknowledging the request. 2117c88f3f6SAchin Gupta * --------------------------------------------- 2127c88f3f6SAchin Gupta */ 2130a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry 2147c88f3f6SAchin Gupta bl tsp_cpu_off_main 2157c88f3f6SAchin Gupta restore_args_call_smc 2168b779620SKévin Petitendfunc tsp_cpu_off_entry 2177c88f3f6SAchin Gupta 2187c88f3f6SAchin Gupta /*--------------------------------------------- 219d5f13093SJuan Castillo * This entrypoint is used by the TSPD when the 220d5f13093SJuan Castillo * system is about to be switched off (through 221d5f13093SJuan Castillo * a SYSTEM_OFF psci call) to ask the TSP to 222d5f13093SJuan Castillo * perform any necessary bookkeeping. 223d5f13093SJuan Castillo * --------------------------------------------- 224d5f13093SJuan Castillo */ 225d5f13093SJuan Castillofunc tsp_system_off_entry 226d5f13093SJuan Castillo bl tsp_system_off_main 227d5f13093SJuan Castillo restore_args_call_smc 2288b779620SKévin Petitendfunc tsp_system_off_entry 229d5f13093SJuan Castillo 230d5f13093SJuan Castillo /*--------------------------------------------- 231d5f13093SJuan Castillo * This entrypoint is used by the TSPD when the 232d5f13093SJuan Castillo * system is about to be reset (through a 233d5f13093SJuan Castillo * SYSTEM_RESET psci call) to ask the TSP to 234d5f13093SJuan Castillo * perform any necessary bookkeeping. 235d5f13093SJuan Castillo * --------------------------------------------- 236d5f13093SJuan Castillo */ 237d5f13093SJuan Castillofunc tsp_system_reset_entry 238d5f13093SJuan Castillo bl tsp_system_reset_main 239d5f13093SJuan Castillo restore_args_call_smc 2408b779620SKévin Petitendfunc tsp_system_reset_entry 241d5f13093SJuan Castillo 242d5f13093SJuan Castillo /*--------------------------------------------- 2437c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 2447c88f3f6SAchin Gupta * cpu is turned on using a CPU_ON psci call to 2457c88f3f6SAchin Gupta * ask the TSP to initialise itself i.e. setup 2467c88f3f6SAchin Gupta * the mmu, stacks etc. Minimal architectural 2477c88f3f6SAchin Gupta * state will be initialised by the TSPD when 2487c88f3f6SAchin Gupta * this function is entered i.e. Caches and MMU 2497c88f3f6SAchin Gupta * will be turned off, the execution state 2507c88f3f6SAchin Gupta * will be aarch64 and exceptions masked. 2517c88f3f6SAchin Gupta * --------------------------------------------- 2527c88f3f6SAchin Gupta */ 2530a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry 2547c88f3f6SAchin Gupta /* --------------------------------------------- 2557c88f3f6SAchin Gupta * Set the exception vector to something sane. 2567c88f3f6SAchin Gupta * --------------------------------------------- 2577c88f3f6SAchin Gupta */ 25857356e90SAchin Gupta adr x0, tsp_exceptions 2597c88f3f6SAchin Gupta msr vbar_el1, x0 2600c8d4fefSAchin Gupta isb 2610c8d4fefSAchin Gupta 2620c8d4fefSAchin Gupta /* Enable the SError interrupt */ 2630c8d4fefSAchin Gupta msr daifclr, #DAIF_ABT_BIT 2647c88f3f6SAchin Gupta 2657c88f3f6SAchin Gupta /* --------------------------------------------- 266ec3c1003SAchin Gupta * Enable the instruction cache, stack pointer 267ec3c1003SAchin Gupta * and data access alignment checks 2687c88f3f6SAchin Gupta * --------------------------------------------- 2697c88f3f6SAchin Gupta */ 270ec3c1003SAchin Gupta mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 2717c88f3f6SAchin Gupta mrs x0, sctlr_el1 272ec3c1003SAchin Gupta orr x0, x0, x1 2737c88f3f6SAchin Gupta msr sctlr_el1, x0 2747c88f3f6SAchin Gupta isb 2757c88f3f6SAchin Gupta 2767c88f3f6SAchin Gupta /* -------------------------------------------- 277b51da821SAchin Gupta * Give ourselves a stack whose memory will be 278b51da821SAchin Gupta * marked as Normal-IS-WBWA when the MMU is 279b51da821SAchin Gupta * enabled. 2807c88f3f6SAchin Gupta * -------------------------------------------- 2817c88f3f6SAchin Gupta */ 282fd650ff6SSoby Mathew bl plat_set_my_stack 2837c88f3f6SAchin Gupta 284b51da821SAchin Gupta /* -------------------------------------------- 285bb00ea5bSJeenu Viswambharan * Enable MMU and D-caches together. 286b51da821SAchin Gupta * -------------------------------------------- 2877c88f3f6SAchin Gupta */ 288bb00ea5bSJeenu Viswambharan mov x0, #0 289dff8e47aSDan Handley bl bl32_plat_enable_mmu 2907c88f3f6SAchin Gupta 291ed108b56SAlexei Fedorov#if ENABLE_PAUTH 292ed108b56SAlexei Fedorov /* --------------------------------------------- 293ed108b56SAlexei Fedorov * Program APIAKey_EL1 294ed108b56SAlexei Fedorov * and enable pointer authentication 295ed108b56SAlexei Fedorov * --------------------------------------------- 296ed108b56SAlexei Fedorov */ 297ed108b56SAlexei Fedorov bl pauth_init_enable_el1 298ed108b56SAlexei Fedorov#endif /* ENABLE_PAUTH */ 299ed108b56SAlexei Fedorov 3007c88f3f6SAchin Gupta /* --------------------------------------------- 3017c88f3f6SAchin Gupta * Enter C runtime to perform any remaining 3027c88f3f6SAchin Gupta * book keeping 3037c88f3f6SAchin Gupta * --------------------------------------------- 3047c88f3f6SAchin Gupta */ 3057c88f3f6SAchin Gupta bl tsp_cpu_on_main 3067c88f3f6SAchin Gupta restore_args_call_smc 3077c88f3f6SAchin Gupta 3087c88f3f6SAchin Gupta /* Should never reach here */ 3097c88f3f6SAchin Guptatsp_cpu_on_entry_panic: 3107c88f3f6SAchin Gupta b tsp_cpu_on_entry_panic 3118b779620SKévin Petitendfunc tsp_cpu_on_entry 3127c88f3f6SAchin Gupta 3137c88f3f6SAchin Gupta /*--------------------------------------------- 3147c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 3157c88f3f6SAchin Gupta * cpu is to be suspended through a CPU_SUSPEND 3167c88f3f6SAchin Gupta * psci call to ask the TSP to perform any 3177c88f3f6SAchin Gupta * bookeeping necessary. In the current 3187c88f3f6SAchin Gupta * implementation, the TSPD saves and restores 3197c88f3f6SAchin Gupta * the EL1 state. 3207c88f3f6SAchin Gupta * --------------------------------------------- 3217c88f3f6SAchin Gupta */ 3220a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry 3237c88f3f6SAchin Gupta bl tsp_cpu_suspend_main 3247c88f3f6SAchin Gupta restore_args_call_smc 3258b779620SKévin Petitendfunc tsp_cpu_suspend_entry 3267c88f3f6SAchin Gupta 32702446137SSoby Mathew /*------------------------------------------------- 3286cf89021SAchin Gupta * This entrypoint is used by the TSPD to pass 32963b8440fSSoby Mathew * control for `synchronously` handling a S-EL1 33063b8440fSSoby Mathew * Interrupt which was triggered while executing 33163b8440fSSoby Mathew * in normal world. 'x0' contains a magic number 33263b8440fSSoby Mathew * which indicates this. TSPD expects control to 33363b8440fSSoby Mathew * be handed back at the end of interrupt 33463b8440fSSoby Mathew * processing. This is done through an SMC. 33563b8440fSSoby Mathew * The handover agreement is: 3366cf89021SAchin Gupta * 3376cf89021SAchin Gupta * 1. PSTATE.DAIF are set upon entry. 'x1' has 3386cf89021SAchin Gupta * the ELR_EL3 from the non-secure state. 3396cf89021SAchin Gupta * 2. TSP has to preserve the callee saved 3406cf89021SAchin Gupta * general purpose registers, SP_EL1/EL0 and 3416cf89021SAchin Gupta * LR. 3426cf89021SAchin Gupta * 3. TSP has to preserve the system and vfp 3436cf89021SAchin Gupta * registers (if applicable). 3446cf89021SAchin Gupta * 4. TSP can use 'x0-x18' to enable its C 3456cf89021SAchin Gupta * runtime. 3466cf89021SAchin Gupta * 5. TSP returns to TSPD using an SMC with 34702446137SSoby Mathew * 'x0' = TSP_HANDLED_S_EL1_INTR 34802446137SSoby Mathew * ------------------------------------------------ 3496cf89021SAchin Gupta */ 35002446137SSoby Mathewfunc tsp_sel1_intr_entry 3516cf89021SAchin Gupta#if DEBUG 35263b8440fSSoby Mathew mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN 3536cf89021SAchin Gupta cmp x0, x2 35402446137SSoby Mathew b.ne tsp_sel1_int_entry_panic 3556cf89021SAchin Gupta#endif 35602446137SSoby Mathew /*------------------------------------------------- 3576cf89021SAchin Gupta * Save any previous context needed to perform 3586cf89021SAchin Gupta * an exception return from S-EL1 e.g. context 35902446137SSoby Mathew * from a previous Non secure Interrupt. 36002446137SSoby Mathew * Update statistics and handle the S-EL1 36102446137SSoby Mathew * interrupt before returning to the TSPD. 3626cf89021SAchin Gupta * IRQ/FIQs are not enabled since that will 3636cf89021SAchin Gupta * complicate the implementation. Execution 3646cf89021SAchin Gupta * will be transferred back to the normal world 36563b8440fSSoby Mathew * in any case. The handler can return 0 36663b8440fSSoby Mathew * if the interrupt was handled or TSP_PREEMPTED 36763b8440fSSoby Mathew * if the expected interrupt was preempted 36863b8440fSSoby Mathew * by an interrupt that should be handled in EL3 36963b8440fSSoby Mathew * e.g. Group 0 interrupt in GICv3. In both 37063b8440fSSoby Mathew * the cases switch to EL3 using SMC with id 37163b8440fSSoby Mathew * TSP_HANDLED_S_EL1_INTR. Any other return value 37263b8440fSSoby Mathew * from the handler will result in panic. 37302446137SSoby Mathew * ------------------------------------------------ 3746cf89021SAchin Gupta */ 3756cf89021SAchin Gupta save_eret_context x2 x3 37602446137SSoby Mathew bl tsp_update_sync_sel1_intr_stats 37702446137SSoby Mathew bl tsp_common_int_handler 37863b8440fSSoby Mathew /* Check if the S-EL1 interrupt has been handled */ 37963b8440fSSoby Mathew cbnz x0, tsp_sel1_intr_check_preemption 38063b8440fSSoby Mathew b tsp_sel1_intr_return 38163b8440fSSoby Mathewtsp_sel1_intr_check_preemption: 38263b8440fSSoby Mathew /* Check if the S-EL1 interrupt has been preempted */ 38363b8440fSSoby Mathew mov_imm x1, TSP_PREEMPTED 38463b8440fSSoby Mathew cmp x0, x1 38563b8440fSSoby Mathew b.ne tsp_sel1_int_entry_panic 38663b8440fSSoby Mathewtsp_sel1_intr_return: 38763b8440fSSoby Mathew mov_imm x0, TSP_HANDLED_S_EL1_INTR 3886cf89021SAchin Gupta restore_eret_context x2 x3 3896cf89021SAchin Gupta smc #0 3906cf89021SAchin Gupta 39163b8440fSSoby Mathew /* Should never reach here */ 39202446137SSoby Mathewtsp_sel1_int_entry_panic: 393a806dad5SJeenu Viswambharan no_ret plat_panic_handler 39402446137SSoby Mathewendfunc tsp_sel1_intr_entry 3956cf89021SAchin Gupta 3966cf89021SAchin Gupta /*--------------------------------------------- 3977c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 3987c88f3f6SAchin Gupta * cpu resumes execution after an earlier 3997c88f3f6SAchin Gupta * CPU_SUSPEND psci call to ask the TSP to 4007c88f3f6SAchin Gupta * restore its saved context. In the current 4017c88f3f6SAchin Gupta * implementation, the TSPD saves and restores 4027c88f3f6SAchin Gupta * EL1 state so nothing is done here apart from 4037c88f3f6SAchin Gupta * acknowledging the request. 4047c88f3f6SAchin Gupta * --------------------------------------------- 4057c88f3f6SAchin Gupta */ 4060a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry 4077c88f3f6SAchin Gupta bl tsp_cpu_resume_main 4087c88f3f6SAchin Gupta restore_args_call_smc 4091c3ea103SAntonio Nino Diaz 4101c3ea103SAntonio Nino Diaz /* Should never reach here */ 411a806dad5SJeenu Viswambharan no_ret plat_panic_handler 4128b779620SKévin Petitendfunc tsp_cpu_resume_entry 4137c88f3f6SAchin Gupta 4147c88f3f6SAchin Gupta /*--------------------------------------------- 4157c88f3f6SAchin Gupta * This entrypoint is used by the TSPD to ask 4167c88f3f6SAchin Gupta * the TSP to service a fast smc request. 4177c88f3f6SAchin Gupta * --------------------------------------------- 4187c88f3f6SAchin Gupta */ 4190a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry 420239b04faSSoby Mathew bl tsp_smc_handler 4217c88f3f6SAchin Gupta restore_args_call_smc 4221c3ea103SAntonio Nino Diaz 4231c3ea103SAntonio Nino Diaz /* Should never reach here */ 424a806dad5SJeenu Viswambharan no_ret plat_panic_handler 4258b779620SKévin Petitendfunc tsp_fast_smc_entry 4267c88f3f6SAchin Gupta 427239b04faSSoby Mathew /*--------------------------------------------- 428239b04faSSoby Mathew * This entrypoint is used by the TSPD to ask 42916292f54SDavid Cunado * the TSP to service a Yielding SMC request. 430239b04faSSoby Mathew * We will enable preemption during execution 431239b04faSSoby Mathew * of tsp_smc_handler. 432239b04faSSoby Mathew * --------------------------------------------- 433239b04faSSoby Mathew */ 43416292f54SDavid Cunadofunc tsp_yield_smc_entry 435239b04faSSoby Mathew msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 436239b04faSSoby Mathew bl tsp_smc_handler 437239b04faSSoby Mathew msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 438239b04faSSoby Mathew restore_args_call_smc 4391c3ea103SAntonio Nino Diaz 4401c3ea103SAntonio Nino Diaz /* Should never reach here */ 441a806dad5SJeenu Viswambharan no_ret plat_panic_handler 44216292f54SDavid Cunadoendfunc tsp_yield_smc_entry 4433df6012aSDouglas Raillard 4443df6012aSDouglas Raillard /*--------------------------------------------------------------------- 44516292f54SDavid Cunado * This entrypoint is used by the TSPD to abort a pre-empted Yielding 4463df6012aSDouglas Raillard * SMC. It could be on behalf of non-secure world or because a CPU 4473df6012aSDouglas Raillard * suspend/CPU off request needs to abort the preempted SMC. 4483df6012aSDouglas Raillard * -------------------------------------------------------------------- 4493df6012aSDouglas Raillard */ 45016292f54SDavid Cunadofunc tsp_abort_yield_smc_entry 4513df6012aSDouglas Raillard 4523df6012aSDouglas Raillard /* 4533df6012aSDouglas Raillard * Exceptions masking is already done by the TSPD when entering this 4543df6012aSDouglas Raillard * hook so there is no need to do it here. 4553df6012aSDouglas Raillard */ 4563df6012aSDouglas Raillard 4573df6012aSDouglas Raillard /* Reset the stack used by the pre-empted SMC */ 4583df6012aSDouglas Raillard bl plat_set_my_stack 4593df6012aSDouglas Raillard 4603df6012aSDouglas Raillard /* 4613df6012aSDouglas Raillard * Allow some cleanup such as releasing locks. 4623df6012aSDouglas Raillard */ 4633df6012aSDouglas Raillard bl tsp_abort_smc_handler 4643df6012aSDouglas Raillard 4653df6012aSDouglas Raillard restore_args_call_smc 4663df6012aSDouglas Raillard 4673df6012aSDouglas Raillard /* Should never reach here */ 4683df6012aSDouglas Raillard bl plat_panic_handler 46916292f54SDavid Cunadoendfunc tsp_abort_yield_smc_entry 470