17c88f3f6SAchin Gupta/* 27c88f3f6SAchin Gupta * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 37c88f3f6SAchin Gupta * 47c88f3f6SAchin Gupta * Redistribution and use in source and binary forms, with or without 57c88f3f6SAchin Gupta * modification, are permitted provided that the following conditions are met: 67c88f3f6SAchin Gupta * 77c88f3f6SAchin Gupta * Redistributions of source code must retain the above copyright notice, this 87c88f3f6SAchin Gupta * list of conditions and the following disclaimer. 97c88f3f6SAchin Gupta * 107c88f3f6SAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 117c88f3f6SAchin Gupta * this list of conditions and the following disclaimer in the documentation 127c88f3f6SAchin Gupta * and/or other materials provided with the distribution. 137c88f3f6SAchin Gupta * 147c88f3f6SAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 157c88f3f6SAchin Gupta * to endorse or promote products derived from this software without specific 167c88f3f6SAchin Gupta * prior written permission. 177c88f3f6SAchin Gupta * 187c88f3f6SAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 197c88f3f6SAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 207c88f3f6SAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 217c88f3f6SAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 227c88f3f6SAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 237c88f3f6SAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 247c88f3f6SAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 257c88f3f6SAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 267c88f3f6SAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 277c88f3f6SAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 287c88f3f6SAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 297c88f3f6SAchin Gupta */ 307c88f3f6SAchin Gupta 317c88f3f6SAchin Gupta#include <arch.h> 320a30cf54SAndrew Thoelke#include <asm_macros.S> 3397043ac9SDan Handley#include <tsp.h> 34b51da821SAchin Gupta#include <xlat_tables.h> 35da0af78aSDan Handley#include "../tsp_private.h" 367c88f3f6SAchin Gupta 377c88f3f6SAchin Gupta 387c88f3f6SAchin Gupta .globl tsp_entrypoint 39399fb08fSAndrew Thoelke .globl tsp_vector_table 407c88f3f6SAchin Gupta 41239b04faSSoby Mathew 42239b04faSSoby Mathew 437c88f3f6SAchin Gupta /* --------------------------------------------- 447c88f3f6SAchin Gupta * Populate the params in x0-x7 from the pointer 457c88f3f6SAchin Gupta * to the smc args structure in x0. 467c88f3f6SAchin Gupta * --------------------------------------------- 477c88f3f6SAchin Gupta */ 487c88f3f6SAchin Gupta .macro restore_args_call_smc 497c88f3f6SAchin Gupta ldp x6, x7, [x0, #TSP_ARG6] 507c88f3f6SAchin Gupta ldp x4, x5, [x0, #TSP_ARG4] 517c88f3f6SAchin Gupta ldp x2, x3, [x0, #TSP_ARG2] 527c88f3f6SAchin Gupta ldp x0, x1, [x0, #TSP_ARG0] 537c88f3f6SAchin Gupta smc #0 547c88f3f6SAchin Gupta .endm 557c88f3f6SAchin Gupta 566cf89021SAchin Gupta .macro save_eret_context reg1 reg2 576cf89021SAchin Gupta mrs \reg1, elr_el1 586cf89021SAchin Gupta mrs \reg2, spsr_el1 596cf89021SAchin Gupta stp \reg1, \reg2, [sp, #-0x10]! 606cf89021SAchin Gupta stp x30, x18, [sp, #-0x10]! 616cf89021SAchin Gupta .endm 626cf89021SAchin Gupta 636cf89021SAchin Gupta .macro restore_eret_context reg1 reg2 646cf89021SAchin Gupta ldp x30, x18, [sp], #0x10 656cf89021SAchin Gupta ldp \reg1, \reg2, [sp], #0x10 666cf89021SAchin Gupta msr elr_el1, \reg1 676cf89021SAchin Gupta msr spsr_el1, \reg2 686cf89021SAchin Gupta .endm 696cf89021SAchin Gupta 706cf89021SAchin Gupta .section .text, "ax" 716cf89021SAchin Gupta .align 3 727c88f3f6SAchin Gupta 730a30cf54SAndrew Thoelkefunc tsp_entrypoint 747c88f3f6SAchin Gupta 757c88f3f6SAchin Gupta /* --------------------------------------------- 767c88f3f6SAchin Gupta * Set the exception vector to something sane. 777c88f3f6SAchin Gupta * --------------------------------------------- 787c88f3f6SAchin Gupta */ 7957356e90SAchin Gupta adr x0, tsp_exceptions 807c88f3f6SAchin Gupta msr vbar_el1, x0 810c8d4fefSAchin Gupta isb 820c8d4fefSAchin Gupta 830c8d4fefSAchin Gupta /* --------------------------------------------- 840c8d4fefSAchin Gupta * Enable the SError interrupt now that the 850c8d4fefSAchin Gupta * exception vectors have been setup. 860c8d4fefSAchin Gupta * --------------------------------------------- 870c8d4fefSAchin Gupta */ 880c8d4fefSAchin Gupta msr daifclr, #DAIF_ABT_BIT 897c88f3f6SAchin Gupta 907c88f3f6SAchin Gupta /* --------------------------------------------- 91ec3c1003SAchin Gupta * Enable the instruction cache, stack pointer 92ec3c1003SAchin Gupta * and data access alignment checks 937c88f3f6SAchin Gupta * --------------------------------------------- 947c88f3f6SAchin Gupta */ 95ec3c1003SAchin Gupta mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 967c88f3f6SAchin Gupta mrs x0, sctlr_el1 97ec3c1003SAchin Gupta orr x0, x0, x1 987c88f3f6SAchin Gupta msr sctlr_el1, x0 997c88f3f6SAchin Gupta isb 1007c88f3f6SAchin Gupta 1017c88f3f6SAchin Gupta /* --------------------------------------------- 1027c88f3f6SAchin Gupta * Zero out NOBITS sections. There are 2 of them: 1037c88f3f6SAchin Gupta * - the .bss section; 1047c88f3f6SAchin Gupta * - the coherent memory section. 1057c88f3f6SAchin Gupta * --------------------------------------------- 1067c88f3f6SAchin Gupta */ 1077c88f3f6SAchin Gupta ldr x0, =__BSS_START__ 1087c88f3f6SAchin Gupta ldr x1, =__BSS_SIZE__ 1097c88f3f6SAchin Gupta bl zeromem16 1107c88f3f6SAchin Gupta 1117c88f3f6SAchin Gupta ldr x0, =__COHERENT_RAM_START__ 1127c88f3f6SAchin Gupta ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__ 1137c88f3f6SAchin Gupta bl zeromem16 1147c88f3f6SAchin Gupta 1157c88f3f6SAchin Gupta /* -------------------------------------------- 116754a2b7aSAchin Gupta * Allocate a stack whose memory will be marked 117754a2b7aSAchin Gupta * as Normal-IS-WBWA when the MMU is enabled. 118754a2b7aSAchin Gupta * There is no risk of reading stale stack 119754a2b7aSAchin Gupta * memory after enabling the MMU as only the 120754a2b7aSAchin Gupta * primary cpu is running at the moment. 1217c88f3f6SAchin Gupta * -------------------------------------------- 1227c88f3f6SAchin Gupta */ 1237c88f3f6SAchin Gupta mrs x0, mpidr_el1 124754a2b7aSAchin Gupta bl platform_set_stack 1257c88f3f6SAchin Gupta 1267c88f3f6SAchin Gupta /* --------------------------------------------- 1277c88f3f6SAchin Gupta * Perform early platform setup & platform 1287c88f3f6SAchin Gupta * specific early arch. setup e.g. mmu setup 1297c88f3f6SAchin Gupta * --------------------------------------------- 1307c88f3f6SAchin Gupta */ 1315a06bb7eSDan Handley bl tsp_early_platform_setup 1325a06bb7eSDan Handley bl tsp_plat_arch_setup 1337c88f3f6SAchin Gupta 1347c88f3f6SAchin Gupta /* --------------------------------------------- 1357c88f3f6SAchin Gupta * Jump to main function. 1367c88f3f6SAchin Gupta * --------------------------------------------- 1377c88f3f6SAchin Gupta */ 1387c88f3f6SAchin Gupta bl tsp_main 1397c88f3f6SAchin Gupta 1407c88f3f6SAchin Gupta /* --------------------------------------------- 1417c88f3f6SAchin Gupta * Tell TSPD that we are done initialising 1427c88f3f6SAchin Gupta * --------------------------------------------- 1437c88f3f6SAchin Gupta */ 1447c88f3f6SAchin Gupta mov x1, x0 1457c88f3f6SAchin Gupta mov x0, #TSP_ENTRY_DONE 1467c88f3f6SAchin Gupta smc #0 1477c88f3f6SAchin Gupta 1487c88f3f6SAchin Guptatsp_entrypoint_panic: 1497c88f3f6SAchin Gupta b tsp_entrypoint_panic 1507c88f3f6SAchin Gupta 151399fb08fSAndrew Thoelke 152399fb08fSAndrew Thoelke /* ------------------------------------------- 153399fb08fSAndrew Thoelke * Table of entrypoint vectors provided to the 154399fb08fSAndrew Thoelke * TSPD for the various entrypoints 155399fb08fSAndrew Thoelke * ------------------------------------------- 156399fb08fSAndrew Thoelke */ 157399fb08fSAndrew Thoelkefunc tsp_vector_table 158399fb08fSAndrew Thoelke b tsp_std_smc_entry 159399fb08fSAndrew Thoelke b tsp_fast_smc_entry 160399fb08fSAndrew Thoelke b tsp_cpu_on_entry 161399fb08fSAndrew Thoelke b tsp_cpu_off_entry 162399fb08fSAndrew Thoelke b tsp_cpu_resume_entry 163399fb08fSAndrew Thoelke b tsp_cpu_suspend_entry 164399fb08fSAndrew Thoelke b tsp_fiq_entry 165*d5f13093SJuan Castillo b tsp_system_off_entry 166*d5f13093SJuan Castillo b tsp_system_reset_entry 167399fb08fSAndrew Thoelke 1687c88f3f6SAchin Gupta /*--------------------------------------------- 1697c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 1707c88f3f6SAchin Gupta * cpu is to be turned off through a CPU_OFF 1717c88f3f6SAchin Gupta * psci call to ask the TSP to perform any 1727c88f3f6SAchin Gupta * bookeeping necessary. In the current 1737c88f3f6SAchin Gupta * implementation, the TSPD expects the TSP to 1747c88f3f6SAchin Gupta * re-initialise its state so nothing is done 1757c88f3f6SAchin Gupta * here except for acknowledging the request. 1767c88f3f6SAchin Gupta * --------------------------------------------- 1777c88f3f6SAchin Gupta */ 1780a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry 1797c88f3f6SAchin Gupta bl tsp_cpu_off_main 1807c88f3f6SAchin Gupta restore_args_call_smc 1817c88f3f6SAchin Gupta 1827c88f3f6SAchin Gupta /*--------------------------------------------- 183*d5f13093SJuan Castillo * This entrypoint is used by the TSPD when the 184*d5f13093SJuan Castillo * system is about to be switched off (through 185*d5f13093SJuan Castillo * a SYSTEM_OFF psci call) to ask the TSP to 186*d5f13093SJuan Castillo * perform any necessary bookkeeping. 187*d5f13093SJuan Castillo * --------------------------------------------- 188*d5f13093SJuan Castillo */ 189*d5f13093SJuan Castillofunc tsp_system_off_entry 190*d5f13093SJuan Castillo bl tsp_system_off_main 191*d5f13093SJuan Castillo restore_args_call_smc 192*d5f13093SJuan Castillo 193*d5f13093SJuan Castillo /*--------------------------------------------- 194*d5f13093SJuan Castillo * This entrypoint is used by the TSPD when the 195*d5f13093SJuan Castillo * system is about to be reset (through a 196*d5f13093SJuan Castillo * SYSTEM_RESET psci call) to ask the TSP to 197*d5f13093SJuan Castillo * perform any necessary bookkeeping. 198*d5f13093SJuan Castillo * --------------------------------------------- 199*d5f13093SJuan Castillo */ 200*d5f13093SJuan Castillofunc tsp_system_reset_entry 201*d5f13093SJuan Castillo bl tsp_system_reset_main 202*d5f13093SJuan Castillo restore_args_call_smc 203*d5f13093SJuan Castillo 204*d5f13093SJuan Castillo /*--------------------------------------------- 2057c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 2067c88f3f6SAchin Gupta * cpu is turned on using a CPU_ON psci call to 2077c88f3f6SAchin Gupta * ask the TSP to initialise itself i.e. setup 2087c88f3f6SAchin Gupta * the mmu, stacks etc. Minimal architectural 2097c88f3f6SAchin Gupta * state will be initialised by the TSPD when 2107c88f3f6SAchin Gupta * this function is entered i.e. Caches and MMU 2117c88f3f6SAchin Gupta * will be turned off, the execution state 2127c88f3f6SAchin Gupta * will be aarch64 and exceptions masked. 2137c88f3f6SAchin Gupta * --------------------------------------------- 2147c88f3f6SAchin Gupta */ 2150a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry 2167c88f3f6SAchin Gupta /* --------------------------------------------- 2177c88f3f6SAchin Gupta * Set the exception vector to something sane. 2187c88f3f6SAchin Gupta * --------------------------------------------- 2197c88f3f6SAchin Gupta */ 22057356e90SAchin Gupta adr x0, tsp_exceptions 2217c88f3f6SAchin Gupta msr vbar_el1, x0 2220c8d4fefSAchin Gupta isb 2230c8d4fefSAchin Gupta 2240c8d4fefSAchin Gupta /* Enable the SError interrupt */ 2250c8d4fefSAchin Gupta msr daifclr, #DAIF_ABT_BIT 2267c88f3f6SAchin Gupta 2277c88f3f6SAchin Gupta /* --------------------------------------------- 228ec3c1003SAchin Gupta * Enable the instruction cache, stack pointer 229ec3c1003SAchin Gupta * and data access alignment checks 2307c88f3f6SAchin Gupta * --------------------------------------------- 2317c88f3f6SAchin Gupta */ 232ec3c1003SAchin Gupta mov x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT) 2337c88f3f6SAchin Gupta mrs x0, sctlr_el1 234ec3c1003SAchin Gupta orr x0, x0, x1 2357c88f3f6SAchin Gupta msr sctlr_el1, x0 2367c88f3f6SAchin Gupta isb 2377c88f3f6SAchin Gupta 2387c88f3f6SAchin Gupta /* -------------------------------------------- 239b51da821SAchin Gupta * Give ourselves a stack whose memory will be 240b51da821SAchin Gupta * marked as Normal-IS-WBWA when the MMU is 241b51da821SAchin Gupta * enabled. 2427c88f3f6SAchin Gupta * -------------------------------------------- 2437c88f3f6SAchin Gupta */ 2447c88f3f6SAchin Gupta mrs x0, mpidr_el1 245b51da821SAchin Gupta bl platform_set_stack 2467c88f3f6SAchin Gupta 247b51da821SAchin Gupta /* -------------------------------------------- 248b51da821SAchin Gupta * Enable the MMU with the DCache disabled. It 249b51da821SAchin Gupta * is safe to use stacks allocated in normal 250b51da821SAchin Gupta * memory as a result. All memory accesses are 251b51da821SAchin Gupta * marked nGnRnE when the MMU is disabled. So 252b51da821SAchin Gupta * all the stack writes will make it to memory. 253b51da821SAchin Gupta * All memory accesses are marked Non-cacheable 254b51da821SAchin Gupta * when the MMU is enabled but D$ is disabled. 255b51da821SAchin Gupta * So used stack memory is guaranteed to be 256b51da821SAchin Gupta * visible immediately after the MMU is enabled 257b51da821SAchin Gupta * Enabling the DCache at the same time as the 258b51da821SAchin Gupta * MMU can lead to speculatively fetched and 259b51da821SAchin Gupta * possibly stale stack memory being read from 260b51da821SAchin Gupta * other caches. This can lead to coherency 261b51da821SAchin Gupta * issues. 262b51da821SAchin Gupta * -------------------------------------------- 2637c88f3f6SAchin Gupta */ 264b51da821SAchin Gupta mov x0, #DISABLE_DCACHE 265dff8e47aSDan Handley bl bl32_plat_enable_mmu 2667c88f3f6SAchin Gupta 2677c88f3f6SAchin Gupta /* --------------------------------------------- 268b51da821SAchin Gupta * Enable the Data cache now that the MMU has 269b51da821SAchin Gupta * been enabled. The stack has been unwound. It 270b51da821SAchin Gupta * will be written first before being read. This 271b51da821SAchin Gupta * will invalidate any stale cache lines resi- 272b51da821SAchin Gupta * -dent in other caches. We assume that 273b51da821SAchin Gupta * interconnect coherency has been enabled for 274b51da821SAchin Gupta * this cluster by EL3 firmware. 2757c88f3f6SAchin Gupta * --------------------------------------------- 2767c88f3f6SAchin Gupta */ 277b51da821SAchin Gupta mrs x0, sctlr_el1 278b51da821SAchin Gupta orr x0, x0, #SCTLR_C_BIT 279b51da821SAchin Gupta msr sctlr_el1, x0 280b51da821SAchin Gupta isb 2817c88f3f6SAchin Gupta 2827c88f3f6SAchin Gupta /* --------------------------------------------- 2837c88f3f6SAchin Gupta * Enter C runtime to perform any remaining 2847c88f3f6SAchin Gupta * book keeping 2857c88f3f6SAchin Gupta * --------------------------------------------- 2867c88f3f6SAchin Gupta */ 2877c88f3f6SAchin Gupta bl tsp_cpu_on_main 2887c88f3f6SAchin Gupta restore_args_call_smc 2897c88f3f6SAchin Gupta 2907c88f3f6SAchin Gupta /* Should never reach here */ 2917c88f3f6SAchin Guptatsp_cpu_on_entry_panic: 2927c88f3f6SAchin Gupta b tsp_cpu_on_entry_panic 2937c88f3f6SAchin Gupta 2947c88f3f6SAchin Gupta /*--------------------------------------------- 2957c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 2967c88f3f6SAchin Gupta * cpu is to be suspended through a CPU_SUSPEND 2977c88f3f6SAchin Gupta * psci call to ask the TSP to perform any 2987c88f3f6SAchin Gupta * bookeeping necessary. In the current 2997c88f3f6SAchin Gupta * implementation, the TSPD saves and restores 3007c88f3f6SAchin Gupta * the EL1 state. 3017c88f3f6SAchin Gupta * --------------------------------------------- 3027c88f3f6SAchin Gupta */ 3030a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry 3047c88f3f6SAchin Gupta bl tsp_cpu_suspend_main 3057c88f3f6SAchin Gupta restore_args_call_smc 3067c88f3f6SAchin Gupta 3077c88f3f6SAchin Gupta /*--------------------------------------------- 3086cf89021SAchin Gupta * This entrypoint is used by the TSPD to pass 3096cf89021SAchin Gupta * control for handling a pending S-EL1 FIQ. 3106cf89021SAchin Gupta * 'x0' contains a magic number which indicates 3116cf89021SAchin Gupta * this. TSPD expects control to be handed back 3126cf89021SAchin Gupta * at the end of FIQ processing. This is done 3136cf89021SAchin Gupta * through an SMC. The handover agreement is: 3146cf89021SAchin Gupta * 3156cf89021SAchin Gupta * 1. PSTATE.DAIF are set upon entry. 'x1' has 3166cf89021SAchin Gupta * the ELR_EL3 from the non-secure state. 3176cf89021SAchin Gupta * 2. TSP has to preserve the callee saved 3186cf89021SAchin Gupta * general purpose registers, SP_EL1/EL0 and 3196cf89021SAchin Gupta * LR. 3206cf89021SAchin Gupta * 3. TSP has to preserve the system and vfp 3216cf89021SAchin Gupta * registers (if applicable). 3226cf89021SAchin Gupta * 4. TSP can use 'x0-x18' to enable its C 3236cf89021SAchin Gupta * runtime. 3246cf89021SAchin Gupta * 5. TSP returns to TSPD using an SMC with 3256cf89021SAchin Gupta * 'x0' = TSP_HANDLED_S_EL1_FIQ 3266cf89021SAchin Gupta * --------------------------------------------- 3276cf89021SAchin Gupta */ 3286cf89021SAchin Guptafunc tsp_fiq_entry 3296cf89021SAchin Gupta#if DEBUG 3306cf89021SAchin Gupta mov x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff) 3316cf89021SAchin Gupta movk x2, #(TSP_HANDLE_FIQ_AND_RETURN & 0xffff) 3326cf89021SAchin Gupta cmp x0, x2 3336cf89021SAchin Gupta b.ne tsp_fiq_entry_panic 3346cf89021SAchin Gupta#endif 3356cf89021SAchin Gupta /*--------------------------------------------- 3366cf89021SAchin Gupta * Save any previous context needed to perform 3376cf89021SAchin Gupta * an exception return from S-EL1 e.g. context 3386cf89021SAchin Gupta * from a previous IRQ. Update statistics and 3396cf89021SAchin Gupta * handle the FIQ before returning to the TSPD. 3406cf89021SAchin Gupta * IRQ/FIQs are not enabled since that will 3416cf89021SAchin Gupta * complicate the implementation. Execution 3426cf89021SAchin Gupta * will be transferred back to the normal world 3436cf89021SAchin Gupta * in any case. A non-zero return value from the 3446cf89021SAchin Gupta * fiq handler is an error. 3456cf89021SAchin Gupta * --------------------------------------------- 3466cf89021SAchin Gupta */ 3476cf89021SAchin Gupta save_eret_context x2 x3 3486cf89021SAchin Gupta bl tsp_update_sync_fiq_stats 3496cf89021SAchin Gupta bl tsp_fiq_handler 3506cf89021SAchin Gupta cbnz x0, tsp_fiq_entry_panic 3516cf89021SAchin Gupta restore_eret_context x2 x3 3526cf89021SAchin Gupta mov x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff) 3536cf89021SAchin Gupta movk x0, #(TSP_HANDLED_S_EL1_FIQ & 0xffff) 3546cf89021SAchin Gupta smc #0 3556cf89021SAchin Gupta 3566cf89021SAchin Guptatsp_fiq_entry_panic: 3576cf89021SAchin Gupta b tsp_fiq_entry_panic 3586cf89021SAchin Gupta 3596cf89021SAchin Gupta /*--------------------------------------------- 3607c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 3617c88f3f6SAchin Gupta * cpu resumes execution after an earlier 3627c88f3f6SAchin Gupta * CPU_SUSPEND psci call to ask the TSP to 3637c88f3f6SAchin Gupta * restore its saved context. In the current 3647c88f3f6SAchin Gupta * implementation, the TSPD saves and restores 3657c88f3f6SAchin Gupta * EL1 state so nothing is done here apart from 3667c88f3f6SAchin Gupta * acknowledging the request. 3677c88f3f6SAchin Gupta * --------------------------------------------- 3687c88f3f6SAchin Gupta */ 3690a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry 3707c88f3f6SAchin Gupta bl tsp_cpu_resume_main 3717c88f3f6SAchin Gupta restore_args_call_smc 3727c88f3f6SAchin Guptatsp_cpu_resume_panic: 3737c88f3f6SAchin Gupta b tsp_cpu_resume_panic 3747c88f3f6SAchin Gupta 3757c88f3f6SAchin Gupta /*--------------------------------------------- 3767c88f3f6SAchin Gupta * This entrypoint is used by the TSPD to ask 3777c88f3f6SAchin Gupta * the TSP to service a fast smc request. 3787c88f3f6SAchin Gupta * --------------------------------------------- 3797c88f3f6SAchin Gupta */ 3800a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry 381239b04faSSoby Mathew bl tsp_smc_handler 3827c88f3f6SAchin Gupta restore_args_call_smc 3837c88f3f6SAchin Guptatsp_fast_smc_entry_panic: 3847c88f3f6SAchin Gupta b tsp_fast_smc_entry_panic 3857c88f3f6SAchin Gupta 386239b04faSSoby Mathew /*--------------------------------------------- 387239b04faSSoby Mathew * This entrypoint is used by the TSPD to ask 388239b04faSSoby Mathew * the TSP to service a std smc request. 389239b04faSSoby Mathew * We will enable preemption during execution 390239b04faSSoby Mathew * of tsp_smc_handler. 391239b04faSSoby Mathew * --------------------------------------------- 392239b04faSSoby Mathew */ 393239b04faSSoby Mathewfunc tsp_std_smc_entry 394239b04faSSoby Mathew msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 395239b04faSSoby Mathew bl tsp_smc_handler 396239b04faSSoby Mathew msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 397239b04faSSoby Mathew restore_args_call_smc 398239b04faSSoby Mathewtsp_std_smc_entry_panic: 399239b04faSSoby Mathew b tsp_std_smc_entry_panic 400