17c88f3f6SAchin Gupta/* 2*32d9e8ecSHarrison Mutai * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. 37c88f3f6SAchin Gupta * 482cb2c1aSdp-arm * SPDX-License-Identifier: BSD-3-Clause 57c88f3f6SAchin Gupta */ 67c88f3f6SAchin Gupta 7d974301dSMasahiro Yamada#include <platform_def.h> 8d974301dSMasahiro Yamada 97c88f3f6SAchin Gupta#include <arch.h> 100a30cf54SAndrew Thoelke#include <asm_macros.S> 1109d40e0eSAntonio Nino Diaz#include <bl32/tsp/tsp.h> 1209d40e0eSAntonio Nino Diaz#include <lib/xlat_tables/xlat_tables_defs.h> 134a8bfdb9SAchin Gupta#include <smccc_helpers.h> 1409d40e0eSAntonio Nino Diaz 15da0af78aSDan Handley#include "../tsp_private.h" 167c88f3f6SAchin Gupta 177c88f3f6SAchin Gupta 187c88f3f6SAchin Gupta .globl tsp_entrypoint 19399fb08fSAndrew Thoelke .globl tsp_vector_table 204a8bfdb9SAchin Gupta#if SPMC_AT_EL3 214a8bfdb9SAchin Gupta .globl tsp_cpu_on_entry 224a8bfdb9SAchin Gupta#endif 237c88f3f6SAchin Gupta 24239b04faSSoby Mathew 25239b04faSSoby Mathew 267c88f3f6SAchin Gupta /* --------------------------------------------- 277c88f3f6SAchin Gupta * Populate the params in x0-x7 from the pointer 287c88f3f6SAchin Gupta * to the smc args structure in x0. 297c88f3f6SAchin Gupta * --------------------------------------------- 307c88f3f6SAchin Gupta */ 317c88f3f6SAchin Gupta .macro restore_args_call_smc 324a8bfdb9SAchin Gupta ldp x6, x7, [x0, #SMC_ARG6] 334a8bfdb9SAchin Gupta ldp x4, x5, [x0, #SMC_ARG4] 344a8bfdb9SAchin Gupta ldp x2, x3, [x0, #SMC_ARG2] 354a8bfdb9SAchin Gupta ldp x0, x1, [x0, #SMC_ARG0] 367c88f3f6SAchin Gupta smc #0 377c88f3f6SAchin Gupta .endm 387c88f3f6SAchin Gupta 396cf89021SAchin Gupta .macro save_eret_context reg1 reg2 406cf89021SAchin Gupta mrs \reg1, elr_el1 416cf89021SAchin Gupta mrs \reg2, spsr_el1 426cf89021SAchin Gupta stp \reg1, \reg2, [sp, #-0x10]! 436cf89021SAchin Gupta stp x30, x18, [sp, #-0x10]! 446cf89021SAchin Gupta .endm 456cf89021SAchin Gupta 466cf89021SAchin Gupta .macro restore_eret_context reg1 reg2 476cf89021SAchin Gupta ldp x30, x18, [sp], #0x10 486cf89021SAchin Gupta ldp \reg1, \reg2, [sp], #0x10 496cf89021SAchin Gupta msr elr_el1, \reg1 506cf89021SAchin Gupta msr spsr_el1, \reg2 516cf89021SAchin Gupta .endm 526cf89021SAchin Gupta 5364726e6dSJulius Wernerfunc tsp_entrypoint _align=3 54*32d9e8ecSHarrison Mutai /*--------------------------------------------- 55*32d9e8ecSHarrison Mutai * Save arguments x0 - x3 from BL1 for future 56*32d9e8ecSHarrison Mutai * use. 57*32d9e8ecSHarrison Mutai * --------------------------------------------- 58*32d9e8ecSHarrison Mutai */ 59*32d9e8ecSHarrison Mutai mov x20, x0 60*32d9e8ecSHarrison Mutai mov x21, x1 61*32d9e8ecSHarrison Mutai mov x22, x2 62*32d9e8ecSHarrison Mutai mov x23, x3 637c88f3f6SAchin Gupta 64d974301dSMasahiro Yamada#if ENABLE_PIE 65d974301dSMasahiro Yamada /* 66d974301dSMasahiro Yamada * ------------------------------------------------------------ 67d974301dSMasahiro Yamada * If PIE is enabled fixup the Global descriptor Table only 68d974301dSMasahiro Yamada * once during primary core cold boot path. 69d974301dSMasahiro Yamada * 70d974301dSMasahiro Yamada * Compile time base address, required for fixup, is calculated 71d974301dSMasahiro Yamada * using "pie_fixup" label present within first page. 72d974301dSMasahiro Yamada * ------------------------------------------------------------ 73d974301dSMasahiro Yamada */ 74d974301dSMasahiro Yamada pie_fixup: 75d974301dSMasahiro Yamada ldr x0, =pie_fixup 76d7b5f408SJimmy Brisson and x0, x0, #~(PAGE_SIZE_MASK) 77d974301dSMasahiro Yamada mov_imm x1, (BL32_LIMIT - BL32_BASE) 78d974301dSMasahiro Yamada add x1, x1, x0 79d974301dSMasahiro Yamada bl fixup_gdt_reloc 80d974301dSMasahiro Yamada#endif /* ENABLE_PIE */ 81d974301dSMasahiro Yamada 827c88f3f6SAchin Gupta /* --------------------------------------------- 837c88f3f6SAchin Gupta * Set the exception vector to something sane. 847c88f3f6SAchin Gupta * --------------------------------------------- 857c88f3f6SAchin Gupta */ 8657356e90SAchin Gupta adr x0, tsp_exceptions 877c88f3f6SAchin Gupta msr vbar_el1, x0 880c8d4fefSAchin Gupta isb 890c8d4fefSAchin Gupta 900c8d4fefSAchin Gupta /* --------------------------------------------- 910c8d4fefSAchin Gupta * Enable the SError interrupt now that the 920c8d4fefSAchin Gupta * exception vectors have been setup. 930c8d4fefSAchin Gupta * --------------------------------------------- 940c8d4fefSAchin Gupta */ 950c8d4fefSAchin Gupta msr daifclr, #DAIF_ABT_BIT 967c88f3f6SAchin Gupta 977c88f3f6SAchin Gupta /* --------------------------------------------- 98ec3c1003SAchin Gupta * Enable the instruction cache, stack pointer 9902b57943SJohn Tsichritzis * and data access alignment checks and disable 10002b57943SJohn Tsichritzis * speculative loads. 1017c88f3f6SAchin Gupta * --------------------------------------------- 1027c88f3f6SAchin Gupta */ 103ec3c1003SAchin Gupta mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 1047c88f3f6SAchin Gupta mrs x0, sctlr_el1 105ec3c1003SAchin Gupta orr x0, x0, x1 10610ecd580SBoyan Karatotev#if ENABLE_BTI 10710ecd580SBoyan Karatotev /* Enable PAC branch type compatibility */ 10810ecd580SBoyan Karatotev bic x0, x0, #(SCTLR_BT0_BIT | SCTLR_BT1_BIT) 10910ecd580SBoyan Karatotev#endif 11002b57943SJohn Tsichritzis bic x0, x0, #SCTLR_DSSBS_BIT 1117c88f3f6SAchin Gupta msr sctlr_el1, x0 1127c88f3f6SAchin Gupta isb 1137c88f3f6SAchin Gupta 1147c88f3f6SAchin Gupta /* --------------------------------------------- 11554dc71e7SAchin Gupta * Invalidate the RW memory used by the BL32 11654dc71e7SAchin Gupta * image. This includes the data and NOBITS 11754dc71e7SAchin Gupta * sections. This is done to safeguard against 11854dc71e7SAchin Gupta * possible corruption of this memory by dirty 11954dc71e7SAchin Gupta * cache lines in a system cache as a result of 120596d20d9SZelalem Aweke * use by an earlier boot loader stage. If PIE 121596d20d9SZelalem Aweke * is enabled however, RO sections including the 122596d20d9SZelalem Aweke * GOT may be modified during pie fixup. 123596d20d9SZelalem Aweke * Therefore, to be on the safe side, invalidate 124596d20d9SZelalem Aweke * the entire image region if PIE is enabled. 12554dc71e7SAchin Gupta * --------------------------------------------- 12654dc71e7SAchin Gupta */ 127596d20d9SZelalem Aweke#if ENABLE_PIE 128596d20d9SZelalem Aweke#if SEPARATE_CODE_AND_RODATA 129596d20d9SZelalem Aweke adrp x0, __TEXT_START__ 130596d20d9SZelalem Aweke add x0, x0, :lo12:__TEXT_START__ 131596d20d9SZelalem Aweke#else 132596d20d9SZelalem Aweke adrp x0, __RO_START__ 133596d20d9SZelalem Aweke add x0, x0, :lo12:__RO_START__ 134596d20d9SZelalem Aweke#endif /* SEPARATE_CODE_AND_RODATA */ 135596d20d9SZelalem Aweke#else 136596d20d9SZelalem Aweke adrp x0, __RW_START__ 137596d20d9SZelalem Aweke add x0, x0, :lo12:__RW_START__ 138596d20d9SZelalem Aweke#endif /* ENABLE_PIE */ 139596d20d9SZelalem Aweke adrp x1, __RW_END__ 140596d20d9SZelalem Aweke add x1, x1, :lo12:__RW_END__ 14154dc71e7SAchin Gupta sub x1, x1, x0 14254dc71e7SAchin Gupta bl inv_dcache_range 14354dc71e7SAchin Gupta 14454dc71e7SAchin Gupta /* --------------------------------------------- 1457c88f3f6SAchin Gupta * Zero out NOBITS sections. There are 2 of them: 1467c88f3f6SAchin Gupta * - the .bss section; 1477c88f3f6SAchin Gupta * - the coherent memory section. 1487c88f3f6SAchin Gupta * --------------------------------------------- 1497c88f3f6SAchin Gupta */ 150fb4f511fSYann Gautier adrp x0, __BSS_START__ 151fb4f511fSYann Gautier add x0, x0, :lo12:__BSS_START__ 152fb4f511fSYann Gautier adrp x1, __BSS_END__ 153fb4f511fSYann Gautier add x1, x1, :lo12:__BSS_END__ 154fb4f511fSYann Gautier sub x1, x1, x0 155308d359bSDouglas Raillard bl zeromem 1567c88f3f6SAchin Gupta 157ab8707e6SSoby Mathew#if USE_COHERENT_MEM 158fb4f511fSYann Gautier adrp x0, __COHERENT_RAM_START__ 159fb4f511fSYann Gautier add x0, x0, :lo12:__COHERENT_RAM_START__ 160fb4f511fSYann Gautier adrp x1, __COHERENT_RAM_END_UNALIGNED__ 161fb4f511fSYann Gautier add x1, x1, :lo12:__COHERENT_RAM_END_UNALIGNED__ 162fb4f511fSYann Gautier sub x1, x1, x0 163308d359bSDouglas Raillard bl zeromem 164ab8707e6SSoby Mathew#endif 1657c88f3f6SAchin Gupta 1667c88f3f6SAchin Gupta /* -------------------------------------------- 167754a2b7aSAchin Gupta * Allocate a stack whose memory will be marked 168754a2b7aSAchin Gupta * as Normal-IS-WBWA when the MMU is enabled. 169754a2b7aSAchin Gupta * There is no risk of reading stale stack 170754a2b7aSAchin Gupta * memory after enabling the MMU as only the 171754a2b7aSAchin Gupta * primary cpu is running at the moment. 1727c88f3f6SAchin Gupta * -------------------------------------------- 1737c88f3f6SAchin Gupta */ 174fd650ff6SSoby Mathew bl plat_set_my_stack 1757c88f3f6SAchin Gupta 1767c88f3f6SAchin Gupta /* --------------------------------------------- 17751faada7SDouglas Raillard * Initialize the stack protector canary before 17851faada7SDouglas Raillard * any C code is called. 17951faada7SDouglas Raillard * --------------------------------------------- 18051faada7SDouglas Raillard */ 18151faada7SDouglas Raillard#if STACK_PROTECTOR_ENABLED 18251faada7SDouglas Raillard bl update_stack_protector_canary 18351faada7SDouglas Raillard#endif 18451faada7SDouglas Raillard 18551faada7SDouglas Raillard /*--------------------------------------------- 186*32d9e8ecSHarrison Mutai * Save arguments x0 - x3 from prio stage for 187*32d9e8ecSHarrison Mutai * future use. 188*32d9e8ecSHarrison Mutai * --------------------------------------------- 189*32d9e8ecSHarrison Mutai */ 190*32d9e8ecSHarrison Mutai mov x0, x20 191*32d9e8ecSHarrison Mutai mov x1, x21 192*32d9e8ecSHarrison Mutai mov x2, x22 193*32d9e8ecSHarrison Mutai mov x3, x23 194*32d9e8ecSHarrison Mutai 195*32d9e8ecSHarrison Mutai /* --------------------------------------------- 19667b6ff9fSAntonio Nino Diaz * Perform TSP setup 1977c88f3f6SAchin Gupta * --------------------------------------------- 1987c88f3f6SAchin Gupta */ 19967b6ff9fSAntonio Nino Diaz bl tsp_setup 20067b6ff9fSAntonio Nino Diaz 20167b6ff9fSAntonio Nino Diaz#if ENABLE_PAUTH 2029fc59639SAlexei Fedorov /* --------------------------------------------- 203ed108b56SAlexei Fedorov * Program APIAKey_EL1 204ed108b56SAlexei Fedorov * and enable pointer authentication 2059fc59639SAlexei Fedorov * --------------------------------------------- 2069fc59639SAlexei Fedorov */ 207ed108b56SAlexei Fedorov bl pauth_init_enable_el1 20867b6ff9fSAntonio Nino Diaz#endif /* ENABLE_PAUTH */ 2097c88f3f6SAchin Gupta 2107c88f3f6SAchin Gupta /* --------------------------------------------- 2117c88f3f6SAchin Gupta * Jump to main function. 2127c88f3f6SAchin Gupta * --------------------------------------------- 2137c88f3f6SAchin Gupta */ 2147c88f3f6SAchin Gupta bl tsp_main 2157c88f3f6SAchin Gupta 2167c88f3f6SAchin Gupta /* --------------------------------------------- 2177c88f3f6SAchin Gupta * Tell TSPD that we are done initialising 2187c88f3f6SAchin Gupta * --------------------------------------------- 2197c88f3f6SAchin Gupta */ 2207c88f3f6SAchin Gupta mov x1, x0 2217c88f3f6SAchin Gupta mov x0, #TSP_ENTRY_DONE 2227c88f3f6SAchin Gupta smc #0 2237c88f3f6SAchin Gupta 2247c88f3f6SAchin Guptatsp_entrypoint_panic: 2257c88f3f6SAchin Gupta b tsp_entrypoint_panic 2268b779620SKévin Petitendfunc tsp_entrypoint 2277c88f3f6SAchin Gupta 228399fb08fSAndrew Thoelke 229399fb08fSAndrew Thoelke /* ------------------------------------------- 230399fb08fSAndrew Thoelke * Table of entrypoint vectors provided to the 231399fb08fSAndrew Thoelke * TSPD for the various entrypoints 232399fb08fSAndrew Thoelke * ------------------------------------------- 233399fb08fSAndrew Thoelke */ 2349fc59639SAlexei Fedorovvector_base tsp_vector_table 23516292f54SDavid Cunado b tsp_yield_smc_entry 236399fb08fSAndrew Thoelke b tsp_fast_smc_entry 237399fb08fSAndrew Thoelke b tsp_cpu_on_entry 238399fb08fSAndrew Thoelke b tsp_cpu_off_entry 239399fb08fSAndrew Thoelke b tsp_cpu_resume_entry 240399fb08fSAndrew Thoelke b tsp_cpu_suspend_entry 24102446137SSoby Mathew b tsp_sel1_intr_entry 242d5f13093SJuan Castillo b tsp_system_off_entry 243d5f13093SJuan Castillo b tsp_system_reset_entry 24416292f54SDavid Cunado b tsp_abort_yield_smc_entry 245399fb08fSAndrew Thoelke 2467c88f3f6SAchin Gupta /*--------------------------------------------- 2477c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 2487c88f3f6SAchin Gupta * cpu is to be turned off through a CPU_OFF 2497c88f3f6SAchin Gupta * psci call to ask the TSP to perform any 2507c88f3f6SAchin Gupta * bookeeping necessary. In the current 2517c88f3f6SAchin Gupta * implementation, the TSPD expects the TSP to 2527c88f3f6SAchin Gupta * re-initialise its state so nothing is done 2537c88f3f6SAchin Gupta * here except for acknowledging the request. 2547c88f3f6SAchin Gupta * --------------------------------------------- 2557c88f3f6SAchin Gupta */ 2560a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry 2577c88f3f6SAchin Gupta bl tsp_cpu_off_main 2587c88f3f6SAchin Gupta restore_args_call_smc 2598b779620SKévin Petitendfunc tsp_cpu_off_entry 2607c88f3f6SAchin Gupta 2617c88f3f6SAchin Gupta /*--------------------------------------------- 262d5f13093SJuan Castillo * This entrypoint is used by the TSPD when the 263d5f13093SJuan Castillo * system is about to be switched off (through 264d5f13093SJuan Castillo * a SYSTEM_OFF psci call) to ask the TSP to 265d5f13093SJuan Castillo * perform any necessary bookkeeping. 266d5f13093SJuan Castillo * --------------------------------------------- 267d5f13093SJuan Castillo */ 268d5f13093SJuan Castillofunc tsp_system_off_entry 269d5f13093SJuan Castillo bl tsp_system_off_main 270d5f13093SJuan Castillo restore_args_call_smc 2718b779620SKévin Petitendfunc tsp_system_off_entry 272d5f13093SJuan Castillo 273d5f13093SJuan Castillo /*--------------------------------------------- 274d5f13093SJuan Castillo * This entrypoint is used by the TSPD when the 275d5f13093SJuan Castillo * system is about to be reset (through a 276d5f13093SJuan Castillo * SYSTEM_RESET psci call) to ask the TSP to 277d5f13093SJuan Castillo * perform any necessary bookkeeping. 278d5f13093SJuan Castillo * --------------------------------------------- 279d5f13093SJuan Castillo */ 280d5f13093SJuan Castillofunc tsp_system_reset_entry 281d5f13093SJuan Castillo bl tsp_system_reset_main 282d5f13093SJuan Castillo restore_args_call_smc 2838b779620SKévin Petitendfunc tsp_system_reset_entry 284d5f13093SJuan Castillo 285d5f13093SJuan Castillo /*--------------------------------------------- 2867c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 2877c88f3f6SAchin Gupta * cpu is turned on using a CPU_ON psci call to 2887c88f3f6SAchin Gupta * ask the TSP to initialise itself i.e. setup 2897c88f3f6SAchin Gupta * the mmu, stacks etc. Minimal architectural 2907c88f3f6SAchin Gupta * state will be initialised by the TSPD when 2917c88f3f6SAchin Gupta * this function is entered i.e. Caches and MMU 2927c88f3f6SAchin Gupta * will be turned off, the execution state 2937c88f3f6SAchin Gupta * will be aarch64 and exceptions masked. 2947c88f3f6SAchin Gupta * --------------------------------------------- 2957c88f3f6SAchin Gupta */ 2960a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry 2977c88f3f6SAchin Gupta /* --------------------------------------------- 2987c88f3f6SAchin Gupta * Set the exception vector to something sane. 2997c88f3f6SAchin Gupta * --------------------------------------------- 3007c88f3f6SAchin Gupta */ 30157356e90SAchin Gupta adr x0, tsp_exceptions 3027c88f3f6SAchin Gupta msr vbar_el1, x0 3030c8d4fefSAchin Gupta isb 3040c8d4fefSAchin Gupta 3050c8d4fefSAchin Gupta /* Enable the SError interrupt */ 3060c8d4fefSAchin Gupta msr daifclr, #DAIF_ABT_BIT 3077c88f3f6SAchin Gupta 3087c88f3f6SAchin Gupta /* --------------------------------------------- 309ec3c1003SAchin Gupta * Enable the instruction cache, stack pointer 310ec3c1003SAchin Gupta * and data access alignment checks 3117c88f3f6SAchin Gupta * --------------------------------------------- 3127c88f3f6SAchin Gupta */ 313ec3c1003SAchin Gupta mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 3147c88f3f6SAchin Gupta mrs x0, sctlr_el1 315ec3c1003SAchin Gupta orr x0, x0, x1 3167c88f3f6SAchin Gupta msr sctlr_el1, x0 3177c88f3f6SAchin Gupta isb 3187c88f3f6SAchin Gupta 3197c88f3f6SAchin Gupta /* -------------------------------------------- 320b51da821SAchin Gupta * Give ourselves a stack whose memory will be 321b51da821SAchin Gupta * marked as Normal-IS-WBWA when the MMU is 322b51da821SAchin Gupta * enabled. 3237c88f3f6SAchin Gupta * -------------------------------------------- 3247c88f3f6SAchin Gupta */ 325fd650ff6SSoby Mathew bl plat_set_my_stack 3267c88f3f6SAchin Gupta 327b51da821SAchin Gupta /* -------------------------------------------- 328bb00ea5bSJeenu Viswambharan * Enable MMU and D-caches together. 329b51da821SAchin Gupta * -------------------------------------------- 3307c88f3f6SAchin Gupta */ 331bb00ea5bSJeenu Viswambharan mov x0, #0 332dff8e47aSDan Handley bl bl32_plat_enable_mmu 3337c88f3f6SAchin Gupta 334ed108b56SAlexei Fedorov#if ENABLE_PAUTH 335ed108b56SAlexei Fedorov /* --------------------------------------------- 336ed108b56SAlexei Fedorov * Program APIAKey_EL1 337ed108b56SAlexei Fedorov * and enable pointer authentication 338ed108b56SAlexei Fedorov * --------------------------------------------- 339ed108b56SAlexei Fedorov */ 340ed108b56SAlexei Fedorov bl pauth_init_enable_el1 341ed108b56SAlexei Fedorov#endif /* ENABLE_PAUTH */ 342ed108b56SAlexei Fedorov 3437c88f3f6SAchin Gupta /* --------------------------------------------- 3447c88f3f6SAchin Gupta * Enter C runtime to perform any remaining 3457c88f3f6SAchin Gupta * book keeping 3467c88f3f6SAchin Gupta * --------------------------------------------- 3477c88f3f6SAchin Gupta */ 3487c88f3f6SAchin Gupta bl tsp_cpu_on_main 3497c88f3f6SAchin Gupta restore_args_call_smc 3507c88f3f6SAchin Gupta 3517c88f3f6SAchin Gupta /* Should never reach here */ 3527c88f3f6SAchin Guptatsp_cpu_on_entry_panic: 3537c88f3f6SAchin Gupta b tsp_cpu_on_entry_panic 3548b779620SKévin Petitendfunc tsp_cpu_on_entry 3557c88f3f6SAchin Gupta 3567c88f3f6SAchin Gupta /*--------------------------------------------- 3577c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 3587c88f3f6SAchin Gupta * cpu is to be suspended through a CPU_SUSPEND 3597c88f3f6SAchin Gupta * psci call to ask the TSP to perform any 3607c88f3f6SAchin Gupta * bookeeping necessary. In the current 3617c88f3f6SAchin Gupta * implementation, the TSPD saves and restores 3627c88f3f6SAchin Gupta * the EL1 state. 3637c88f3f6SAchin Gupta * --------------------------------------------- 3647c88f3f6SAchin Gupta */ 3650a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry 3667c88f3f6SAchin Gupta bl tsp_cpu_suspend_main 3677c88f3f6SAchin Gupta restore_args_call_smc 3688b779620SKévin Petitendfunc tsp_cpu_suspend_entry 3697c88f3f6SAchin Gupta 37002446137SSoby Mathew /*------------------------------------------------- 3716cf89021SAchin Gupta * This entrypoint is used by the TSPD to pass 37263b8440fSSoby Mathew * control for `synchronously` handling a S-EL1 37363b8440fSSoby Mathew * Interrupt which was triggered while executing 37463b8440fSSoby Mathew * in normal world. 'x0' contains a magic number 37563b8440fSSoby Mathew * which indicates this. TSPD expects control to 37663b8440fSSoby Mathew * be handed back at the end of interrupt 37763b8440fSSoby Mathew * processing. This is done through an SMC. 37863b8440fSSoby Mathew * The handover agreement is: 3796cf89021SAchin Gupta * 3806cf89021SAchin Gupta * 1. PSTATE.DAIF are set upon entry. 'x1' has 3816cf89021SAchin Gupta * the ELR_EL3 from the non-secure state. 3826cf89021SAchin Gupta * 2. TSP has to preserve the callee saved 3836cf89021SAchin Gupta * general purpose registers, SP_EL1/EL0 and 3846cf89021SAchin Gupta * LR. 3856cf89021SAchin Gupta * 3. TSP has to preserve the system and vfp 3866cf89021SAchin Gupta * registers (if applicable). 3876cf89021SAchin Gupta * 4. TSP can use 'x0-x18' to enable its C 3886cf89021SAchin Gupta * runtime. 3896cf89021SAchin Gupta * 5. TSP returns to TSPD using an SMC with 39002446137SSoby Mathew * 'x0' = TSP_HANDLED_S_EL1_INTR 39102446137SSoby Mathew * ------------------------------------------------ 3926cf89021SAchin Gupta */ 39302446137SSoby Mathewfunc tsp_sel1_intr_entry 3946cf89021SAchin Gupta#if DEBUG 39563b8440fSSoby Mathew mov_imm x2, TSP_HANDLE_SEL1_INTR_AND_RETURN 3966cf89021SAchin Gupta cmp x0, x2 39702446137SSoby Mathew b.ne tsp_sel1_int_entry_panic 3986cf89021SAchin Gupta#endif 39902446137SSoby Mathew /*------------------------------------------------- 4006cf89021SAchin Gupta * Save any previous context needed to perform 4016cf89021SAchin Gupta * an exception return from S-EL1 e.g. context 40202446137SSoby Mathew * from a previous Non secure Interrupt. 40302446137SSoby Mathew * Update statistics and handle the S-EL1 40402446137SSoby Mathew * interrupt before returning to the TSPD. 4056cf89021SAchin Gupta * IRQ/FIQs are not enabled since that will 4066cf89021SAchin Gupta * complicate the implementation. Execution 4076cf89021SAchin Gupta * will be transferred back to the normal world 40863b8440fSSoby Mathew * in any case. The handler can return 0 40963b8440fSSoby Mathew * if the interrupt was handled or TSP_PREEMPTED 41063b8440fSSoby Mathew * if the expected interrupt was preempted 41163b8440fSSoby Mathew * by an interrupt that should be handled in EL3 41263b8440fSSoby Mathew * e.g. Group 0 interrupt in GICv3. In both 41363b8440fSSoby Mathew * the cases switch to EL3 using SMC with id 41463b8440fSSoby Mathew * TSP_HANDLED_S_EL1_INTR. Any other return value 41563b8440fSSoby Mathew * from the handler will result in panic. 41602446137SSoby Mathew * ------------------------------------------------ 4176cf89021SAchin Gupta */ 4186cf89021SAchin Gupta save_eret_context x2 x3 41902446137SSoby Mathew bl tsp_update_sync_sel1_intr_stats 42002446137SSoby Mathew bl tsp_common_int_handler 42163b8440fSSoby Mathew /* Check if the S-EL1 interrupt has been handled */ 42263b8440fSSoby Mathew cbnz x0, tsp_sel1_intr_check_preemption 42363b8440fSSoby Mathew b tsp_sel1_intr_return 42463b8440fSSoby Mathewtsp_sel1_intr_check_preemption: 42563b8440fSSoby Mathew /* Check if the S-EL1 interrupt has been preempted */ 42663b8440fSSoby Mathew mov_imm x1, TSP_PREEMPTED 42763b8440fSSoby Mathew cmp x0, x1 42863b8440fSSoby Mathew b.ne tsp_sel1_int_entry_panic 42963b8440fSSoby Mathewtsp_sel1_intr_return: 43063b8440fSSoby Mathew mov_imm x0, TSP_HANDLED_S_EL1_INTR 4316cf89021SAchin Gupta restore_eret_context x2 x3 4326cf89021SAchin Gupta smc #0 4336cf89021SAchin Gupta 43463b8440fSSoby Mathew /* Should never reach here */ 43502446137SSoby Mathewtsp_sel1_int_entry_panic: 436a806dad5SJeenu Viswambharan no_ret plat_panic_handler 43702446137SSoby Mathewendfunc tsp_sel1_intr_entry 4386cf89021SAchin Gupta 4396cf89021SAchin Gupta /*--------------------------------------------- 4407c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 4417c88f3f6SAchin Gupta * cpu resumes execution after an earlier 4427c88f3f6SAchin Gupta * CPU_SUSPEND psci call to ask the TSP to 4437c88f3f6SAchin Gupta * restore its saved context. In the current 4447c88f3f6SAchin Gupta * implementation, the TSPD saves and restores 4457c88f3f6SAchin Gupta * EL1 state so nothing is done here apart from 4467c88f3f6SAchin Gupta * acknowledging the request. 4477c88f3f6SAchin Gupta * --------------------------------------------- 4487c88f3f6SAchin Gupta */ 4490a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry 4507c88f3f6SAchin Gupta bl tsp_cpu_resume_main 4517c88f3f6SAchin Gupta restore_args_call_smc 4521c3ea103SAntonio Nino Diaz 4531c3ea103SAntonio Nino Diaz /* Should never reach here */ 454a806dad5SJeenu Viswambharan no_ret plat_panic_handler 4558b779620SKévin Petitendfunc tsp_cpu_resume_entry 4567c88f3f6SAchin Gupta 4577c88f3f6SAchin Gupta /*--------------------------------------------- 4587c88f3f6SAchin Gupta * This entrypoint is used by the TSPD to ask 4597c88f3f6SAchin Gupta * the TSP to service a fast smc request. 4607c88f3f6SAchin Gupta * --------------------------------------------- 4617c88f3f6SAchin Gupta */ 4620a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry 463239b04faSSoby Mathew bl tsp_smc_handler 4647c88f3f6SAchin Gupta restore_args_call_smc 4651c3ea103SAntonio Nino Diaz 4661c3ea103SAntonio Nino Diaz /* Should never reach here */ 467a806dad5SJeenu Viswambharan no_ret plat_panic_handler 4688b779620SKévin Petitendfunc tsp_fast_smc_entry 4697c88f3f6SAchin Gupta 470239b04faSSoby Mathew /*--------------------------------------------- 471239b04faSSoby Mathew * This entrypoint is used by the TSPD to ask 47216292f54SDavid Cunado * the TSP to service a Yielding SMC request. 473239b04faSSoby Mathew * We will enable preemption during execution 474239b04faSSoby Mathew * of tsp_smc_handler. 475239b04faSSoby Mathew * --------------------------------------------- 476239b04faSSoby Mathew */ 47716292f54SDavid Cunadofunc tsp_yield_smc_entry 478239b04faSSoby Mathew msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 479239b04faSSoby Mathew bl tsp_smc_handler 480239b04faSSoby Mathew msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 481239b04faSSoby Mathew restore_args_call_smc 4821c3ea103SAntonio Nino Diaz 4831c3ea103SAntonio Nino Diaz /* Should never reach here */ 484a806dad5SJeenu Viswambharan no_ret plat_panic_handler 48516292f54SDavid Cunadoendfunc tsp_yield_smc_entry 4863df6012aSDouglas Raillard 4873df6012aSDouglas Raillard /*--------------------------------------------------------------------- 48816292f54SDavid Cunado * This entrypoint is used by the TSPD to abort a pre-empted Yielding 4893df6012aSDouglas Raillard * SMC. It could be on behalf of non-secure world or because a CPU 4903df6012aSDouglas Raillard * suspend/CPU off request needs to abort the preempted SMC. 4913df6012aSDouglas Raillard * -------------------------------------------------------------------- 4923df6012aSDouglas Raillard */ 49316292f54SDavid Cunadofunc tsp_abort_yield_smc_entry 4943df6012aSDouglas Raillard 4953df6012aSDouglas Raillard /* 4963df6012aSDouglas Raillard * Exceptions masking is already done by the TSPD when entering this 4973df6012aSDouglas Raillard * hook so there is no need to do it here. 4983df6012aSDouglas Raillard */ 4993df6012aSDouglas Raillard 5003df6012aSDouglas Raillard /* Reset the stack used by the pre-empted SMC */ 5013df6012aSDouglas Raillard bl plat_set_my_stack 5023df6012aSDouglas Raillard 5033df6012aSDouglas Raillard /* 5043df6012aSDouglas Raillard * Allow some cleanup such as releasing locks. 5053df6012aSDouglas Raillard */ 5063df6012aSDouglas Raillard bl tsp_abort_smc_handler 5073df6012aSDouglas Raillard 5083df6012aSDouglas Raillard restore_args_call_smc 5093df6012aSDouglas Raillard 5103df6012aSDouglas Raillard /* Should never reach here */ 5113df6012aSDouglas Raillard bl plat_panic_handler 51216292f54SDavid Cunadoendfunc tsp_abort_yield_smc_entry 513