17c88f3f6SAchin Gupta/* 27c88f3f6SAchin Gupta * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved. 37c88f3f6SAchin Gupta * 47c88f3f6SAchin Gupta * Redistribution and use in source and binary forms, with or without 57c88f3f6SAchin Gupta * modification, are permitted provided that the following conditions are met: 67c88f3f6SAchin Gupta * 77c88f3f6SAchin Gupta * Redistributions of source code must retain the above copyright notice, this 87c88f3f6SAchin Gupta * list of conditions and the following disclaimer. 97c88f3f6SAchin Gupta * 107c88f3f6SAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 117c88f3f6SAchin Gupta * this list of conditions and the following disclaimer in the documentation 127c88f3f6SAchin Gupta * and/or other materials provided with the distribution. 137c88f3f6SAchin Gupta * 147c88f3f6SAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 157c88f3f6SAchin Gupta * to endorse or promote products derived from this software without specific 167c88f3f6SAchin Gupta * prior written permission. 177c88f3f6SAchin Gupta * 187c88f3f6SAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 197c88f3f6SAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 207c88f3f6SAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 217c88f3f6SAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 227c88f3f6SAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 237c88f3f6SAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 247c88f3f6SAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 257c88f3f6SAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 267c88f3f6SAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 277c88f3f6SAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 287c88f3f6SAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 297c88f3f6SAchin Gupta */ 307c88f3f6SAchin Gupta 317c88f3f6SAchin Gupta#include <arch.h> 320a30cf54SAndrew Thoelke#include <asm_macros.S> 3397043ac9SDan Handley#include <tsp.h> 347c88f3f6SAchin Gupta 357c88f3f6SAchin Gupta 367c88f3f6SAchin Gupta .globl tsp_entrypoint 37*399fb08fSAndrew Thoelke .globl tsp_vector_table 387c88f3f6SAchin Gupta 39239b04faSSoby Mathew 40239b04faSSoby Mathew 417c88f3f6SAchin Gupta /* --------------------------------------------- 427c88f3f6SAchin Gupta * Populate the params in x0-x7 from the pointer 437c88f3f6SAchin Gupta * to the smc args structure in x0. 447c88f3f6SAchin Gupta * --------------------------------------------- 457c88f3f6SAchin Gupta */ 467c88f3f6SAchin Gupta .macro restore_args_call_smc 477c88f3f6SAchin Gupta ldp x6, x7, [x0, #TSP_ARG6] 487c88f3f6SAchin Gupta ldp x4, x5, [x0, #TSP_ARG4] 497c88f3f6SAchin Gupta ldp x2, x3, [x0, #TSP_ARG2] 507c88f3f6SAchin Gupta ldp x0, x1, [x0, #TSP_ARG0] 517c88f3f6SAchin Gupta smc #0 527c88f3f6SAchin Gupta .endm 537c88f3f6SAchin Gupta 546cf89021SAchin Gupta .macro save_eret_context reg1 reg2 556cf89021SAchin Gupta mrs \reg1, elr_el1 566cf89021SAchin Gupta mrs \reg2, spsr_el1 576cf89021SAchin Gupta stp \reg1, \reg2, [sp, #-0x10]! 586cf89021SAchin Gupta stp x30, x18, [sp, #-0x10]! 596cf89021SAchin Gupta .endm 606cf89021SAchin Gupta 616cf89021SAchin Gupta .macro restore_eret_context reg1 reg2 626cf89021SAchin Gupta ldp x30, x18, [sp], #0x10 636cf89021SAchin Gupta ldp \reg1, \reg2, [sp], #0x10 646cf89021SAchin Gupta msr elr_el1, \reg1 656cf89021SAchin Gupta msr spsr_el1, \reg2 666cf89021SAchin Gupta .endm 676cf89021SAchin Gupta 686cf89021SAchin Gupta .section .text, "ax" 696cf89021SAchin Gupta .align 3 707c88f3f6SAchin Gupta 710a30cf54SAndrew Thoelkefunc tsp_entrypoint 727c88f3f6SAchin Gupta 737c88f3f6SAchin Gupta /* --------------------------------------------- 747c88f3f6SAchin Gupta * The entrypoint is expected to be executed 757c88f3f6SAchin Gupta * only by the primary cpu (at least for now). 767c88f3f6SAchin Gupta * So, make sure no secondary has lost its way. 777c88f3f6SAchin Gupta * --------------------------------------------- 787c88f3f6SAchin Gupta */ 797c88f3f6SAchin Gupta mrs x0, mpidr_el1 807c88f3f6SAchin Gupta bl platform_is_primary_cpu 817c88f3f6SAchin Gupta cbz x0, tsp_entrypoint_panic 827c88f3f6SAchin Gupta 837c88f3f6SAchin Gupta /* --------------------------------------------- 847c88f3f6SAchin Gupta * Set the exception vector to something sane. 857c88f3f6SAchin Gupta * --------------------------------------------- 867c88f3f6SAchin Gupta */ 8757356e90SAchin Gupta adr x0, tsp_exceptions 887c88f3f6SAchin Gupta msr vbar_el1, x0 897c88f3f6SAchin Gupta 907c88f3f6SAchin Gupta /* --------------------------------------------- 917c88f3f6SAchin Gupta * Enable the instruction cache. 927c88f3f6SAchin Gupta * --------------------------------------------- 937c88f3f6SAchin Gupta */ 947c88f3f6SAchin Gupta mrs x0, sctlr_el1 957c88f3f6SAchin Gupta orr x0, x0, #SCTLR_I_BIT 967c88f3f6SAchin Gupta msr sctlr_el1, x0 977c88f3f6SAchin Gupta isb 987c88f3f6SAchin Gupta 997c88f3f6SAchin Gupta /* --------------------------------------------- 1007c88f3f6SAchin Gupta * Zero out NOBITS sections. There are 2 of them: 1017c88f3f6SAchin Gupta * - the .bss section; 1027c88f3f6SAchin Gupta * - the coherent memory section. 1037c88f3f6SAchin Gupta * --------------------------------------------- 1047c88f3f6SAchin Gupta */ 1057c88f3f6SAchin Gupta ldr x0, =__BSS_START__ 1067c88f3f6SAchin Gupta ldr x1, =__BSS_SIZE__ 1077c88f3f6SAchin Gupta bl zeromem16 1087c88f3f6SAchin Gupta 1097c88f3f6SAchin Gupta ldr x0, =__COHERENT_RAM_START__ 1107c88f3f6SAchin Gupta ldr x1, =__COHERENT_RAM_UNALIGNED_SIZE__ 1117c88f3f6SAchin Gupta bl zeromem16 1127c88f3f6SAchin Gupta 1137c88f3f6SAchin Gupta /* -------------------------------------------- 1147c88f3f6SAchin Gupta * Give ourselves a small coherent stack to 1157c88f3f6SAchin Gupta * ease the pain of initializing the MMU 1167c88f3f6SAchin Gupta * -------------------------------------------- 1177c88f3f6SAchin Gupta */ 1187c88f3f6SAchin Gupta mrs x0, mpidr_el1 1197c88f3f6SAchin Gupta bl platform_set_coherent_stack 1207c88f3f6SAchin Gupta 1217c88f3f6SAchin Gupta /* --------------------------------------------- 1227c88f3f6SAchin Gupta * Perform early platform setup & platform 1237c88f3f6SAchin Gupta * specific early arch. setup e.g. mmu setup 1247c88f3f6SAchin Gupta * --------------------------------------------- 1257c88f3f6SAchin Gupta */ 1267c88f3f6SAchin Gupta bl bl32_early_platform_setup 1277c88f3f6SAchin Gupta bl bl32_plat_arch_setup 1287c88f3f6SAchin Gupta 1297c88f3f6SAchin Gupta /* --------------------------------------------- 1307c88f3f6SAchin Gupta * Give ourselves a stack allocated in Normal 1317c88f3f6SAchin Gupta * -IS-WBWA memory 1327c88f3f6SAchin Gupta * --------------------------------------------- 1337c88f3f6SAchin Gupta */ 1347c88f3f6SAchin Gupta mrs x0, mpidr_el1 1357c88f3f6SAchin Gupta bl platform_set_stack 1367c88f3f6SAchin Gupta 1377c88f3f6SAchin Gupta /* --------------------------------------------- 1387c88f3f6SAchin Gupta * Jump to main function. 1397c88f3f6SAchin Gupta * --------------------------------------------- 1407c88f3f6SAchin Gupta */ 1417c88f3f6SAchin Gupta bl tsp_main 1427c88f3f6SAchin Gupta 1437c88f3f6SAchin Gupta /* --------------------------------------------- 1447c88f3f6SAchin Gupta * Tell TSPD that we are done initialising 1457c88f3f6SAchin Gupta * --------------------------------------------- 1467c88f3f6SAchin Gupta */ 1477c88f3f6SAchin Gupta mov x1, x0 1487c88f3f6SAchin Gupta mov x0, #TSP_ENTRY_DONE 1497c88f3f6SAchin Gupta smc #0 1507c88f3f6SAchin Gupta 1517c88f3f6SAchin Guptatsp_entrypoint_panic: 1527c88f3f6SAchin Gupta b tsp_entrypoint_panic 1537c88f3f6SAchin Gupta 154*399fb08fSAndrew Thoelke 155*399fb08fSAndrew Thoelke /* ------------------------------------------- 156*399fb08fSAndrew Thoelke * Table of entrypoint vectors provided to the 157*399fb08fSAndrew Thoelke * TSPD for the various entrypoints 158*399fb08fSAndrew Thoelke * ------------------------------------------- 159*399fb08fSAndrew Thoelke */ 160*399fb08fSAndrew Thoelkefunc tsp_vector_table 161*399fb08fSAndrew Thoelke b tsp_std_smc_entry 162*399fb08fSAndrew Thoelke b tsp_fast_smc_entry 163*399fb08fSAndrew Thoelke b tsp_cpu_on_entry 164*399fb08fSAndrew Thoelke b tsp_cpu_off_entry 165*399fb08fSAndrew Thoelke b tsp_cpu_resume_entry 166*399fb08fSAndrew Thoelke b tsp_cpu_suspend_entry 167*399fb08fSAndrew Thoelke b tsp_fiq_entry 168*399fb08fSAndrew Thoelke 1697c88f3f6SAchin Gupta /*--------------------------------------------- 1707c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 1717c88f3f6SAchin Gupta * cpu is to be turned off through a CPU_OFF 1727c88f3f6SAchin Gupta * psci call to ask the TSP to perform any 1737c88f3f6SAchin Gupta * bookeeping necessary. In the current 1747c88f3f6SAchin Gupta * implementation, the TSPD expects the TSP to 1757c88f3f6SAchin Gupta * re-initialise its state so nothing is done 1767c88f3f6SAchin Gupta * here except for acknowledging the request. 1777c88f3f6SAchin Gupta * --------------------------------------------- 1787c88f3f6SAchin Gupta */ 1790a30cf54SAndrew Thoelkefunc tsp_cpu_off_entry 1807c88f3f6SAchin Gupta bl tsp_cpu_off_main 1817c88f3f6SAchin Gupta restore_args_call_smc 1827c88f3f6SAchin Gupta 1837c88f3f6SAchin Gupta /*--------------------------------------------- 1847c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 1857c88f3f6SAchin Gupta * cpu is turned on using a CPU_ON psci call to 1867c88f3f6SAchin Gupta * ask the TSP to initialise itself i.e. setup 1877c88f3f6SAchin Gupta * the mmu, stacks etc. Minimal architectural 1887c88f3f6SAchin Gupta * state will be initialised by the TSPD when 1897c88f3f6SAchin Gupta * this function is entered i.e. Caches and MMU 1907c88f3f6SAchin Gupta * will be turned off, the execution state 1917c88f3f6SAchin Gupta * will be aarch64 and exceptions masked. 1927c88f3f6SAchin Gupta * --------------------------------------------- 1937c88f3f6SAchin Gupta */ 1940a30cf54SAndrew Thoelkefunc tsp_cpu_on_entry 1957c88f3f6SAchin Gupta /* --------------------------------------------- 1967c88f3f6SAchin Gupta * Set the exception vector to something sane. 1977c88f3f6SAchin Gupta * --------------------------------------------- 1987c88f3f6SAchin Gupta */ 19957356e90SAchin Gupta adr x0, tsp_exceptions 2007c88f3f6SAchin Gupta msr vbar_el1, x0 2017c88f3f6SAchin Gupta 2027c88f3f6SAchin Gupta /* --------------------------------------------- 2037c88f3f6SAchin Gupta * Enable the instruction cache. 2047c88f3f6SAchin Gupta * --------------------------------------------- 2057c88f3f6SAchin Gupta */ 2067c88f3f6SAchin Gupta mrs x0, sctlr_el1 2077c88f3f6SAchin Gupta orr x0, x0, #SCTLR_I_BIT 2087c88f3f6SAchin Gupta msr sctlr_el1, x0 2097c88f3f6SAchin Gupta isb 2107c88f3f6SAchin Gupta 2117c88f3f6SAchin Gupta /* -------------------------------------------- 2127c88f3f6SAchin Gupta * Give ourselves a small coherent stack to 2137c88f3f6SAchin Gupta * ease the pain of initializing the MMU 2147c88f3f6SAchin Gupta * -------------------------------------------- 2157c88f3f6SAchin Gupta */ 2167c88f3f6SAchin Gupta mrs x0, mpidr_el1 2177c88f3f6SAchin Gupta bl platform_set_coherent_stack 2187c88f3f6SAchin Gupta 2197c88f3f6SAchin Gupta /* --------------------------------------------- 2207c88f3f6SAchin Gupta * Initialise the MMU 2217c88f3f6SAchin Gupta * --------------------------------------------- 2227c88f3f6SAchin Gupta */ 223b793e431SSandrine Bailleux bl enable_mmu_el1 2247c88f3f6SAchin Gupta 2257c88f3f6SAchin Gupta /* --------------------------------------------- 2267c88f3f6SAchin Gupta * Give ourselves a stack allocated in Normal 2277c88f3f6SAchin Gupta * -IS-WBWA memory 2287c88f3f6SAchin Gupta * --------------------------------------------- 2297c88f3f6SAchin Gupta */ 2307c88f3f6SAchin Gupta mrs x0, mpidr_el1 2317c88f3f6SAchin Gupta bl platform_set_stack 2327c88f3f6SAchin Gupta 2337c88f3f6SAchin Gupta /* --------------------------------------------- 2347c88f3f6SAchin Gupta * Enter C runtime to perform any remaining 2357c88f3f6SAchin Gupta * book keeping 2367c88f3f6SAchin Gupta * --------------------------------------------- 2377c88f3f6SAchin Gupta */ 2387c88f3f6SAchin Gupta bl tsp_cpu_on_main 2397c88f3f6SAchin Gupta restore_args_call_smc 2407c88f3f6SAchin Gupta 2417c88f3f6SAchin Gupta /* Should never reach here */ 2427c88f3f6SAchin Guptatsp_cpu_on_entry_panic: 2437c88f3f6SAchin Gupta b tsp_cpu_on_entry_panic 2447c88f3f6SAchin Gupta 2457c88f3f6SAchin Gupta /*--------------------------------------------- 2467c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 2477c88f3f6SAchin Gupta * cpu is to be suspended through a CPU_SUSPEND 2487c88f3f6SAchin Gupta * psci call to ask the TSP to perform any 2497c88f3f6SAchin Gupta * bookeeping necessary. In the current 2507c88f3f6SAchin Gupta * implementation, the TSPD saves and restores 2517c88f3f6SAchin Gupta * the EL1 state. 2527c88f3f6SAchin Gupta * --------------------------------------------- 2537c88f3f6SAchin Gupta */ 2540a30cf54SAndrew Thoelkefunc tsp_cpu_suspend_entry 2557c88f3f6SAchin Gupta bl tsp_cpu_suspend_main 2567c88f3f6SAchin Gupta restore_args_call_smc 2577c88f3f6SAchin Gupta 2587c88f3f6SAchin Gupta /*--------------------------------------------- 2596cf89021SAchin Gupta * This entrypoint is used by the TSPD to pass 2606cf89021SAchin Gupta * control for handling a pending S-EL1 FIQ. 2616cf89021SAchin Gupta * 'x0' contains a magic number which indicates 2626cf89021SAchin Gupta * this. TSPD expects control to be handed back 2636cf89021SAchin Gupta * at the end of FIQ processing. This is done 2646cf89021SAchin Gupta * through an SMC. The handover agreement is: 2656cf89021SAchin Gupta * 2666cf89021SAchin Gupta * 1. PSTATE.DAIF are set upon entry. 'x1' has 2676cf89021SAchin Gupta * the ELR_EL3 from the non-secure state. 2686cf89021SAchin Gupta * 2. TSP has to preserve the callee saved 2696cf89021SAchin Gupta * general purpose registers, SP_EL1/EL0 and 2706cf89021SAchin Gupta * LR. 2716cf89021SAchin Gupta * 3. TSP has to preserve the system and vfp 2726cf89021SAchin Gupta * registers (if applicable). 2736cf89021SAchin Gupta * 4. TSP can use 'x0-x18' to enable its C 2746cf89021SAchin Gupta * runtime. 2756cf89021SAchin Gupta * 5. TSP returns to TSPD using an SMC with 2766cf89021SAchin Gupta * 'x0' = TSP_HANDLED_S_EL1_FIQ 2776cf89021SAchin Gupta * --------------------------------------------- 2786cf89021SAchin Gupta */ 2796cf89021SAchin Guptafunc tsp_fiq_entry 2806cf89021SAchin Gupta#if DEBUG 2816cf89021SAchin Gupta mov x2, #(TSP_HANDLE_FIQ_AND_RETURN & ~0xffff) 2826cf89021SAchin Gupta movk x2, #(TSP_HANDLE_FIQ_AND_RETURN & 0xffff) 2836cf89021SAchin Gupta cmp x0, x2 2846cf89021SAchin Gupta b.ne tsp_fiq_entry_panic 2856cf89021SAchin Gupta#endif 2866cf89021SAchin Gupta /*--------------------------------------------- 2876cf89021SAchin Gupta * Save any previous context needed to perform 2886cf89021SAchin Gupta * an exception return from S-EL1 e.g. context 2896cf89021SAchin Gupta * from a previous IRQ. Update statistics and 2906cf89021SAchin Gupta * handle the FIQ before returning to the TSPD. 2916cf89021SAchin Gupta * IRQ/FIQs are not enabled since that will 2926cf89021SAchin Gupta * complicate the implementation. Execution 2936cf89021SAchin Gupta * will be transferred back to the normal world 2946cf89021SAchin Gupta * in any case. A non-zero return value from the 2956cf89021SAchin Gupta * fiq handler is an error. 2966cf89021SAchin Gupta * --------------------------------------------- 2976cf89021SAchin Gupta */ 2986cf89021SAchin Gupta save_eret_context x2 x3 2996cf89021SAchin Gupta bl tsp_update_sync_fiq_stats 3006cf89021SAchin Gupta bl tsp_fiq_handler 3016cf89021SAchin Gupta cbnz x0, tsp_fiq_entry_panic 3026cf89021SAchin Gupta restore_eret_context x2 x3 3036cf89021SAchin Gupta mov x0, #(TSP_HANDLED_S_EL1_FIQ & ~0xffff) 3046cf89021SAchin Gupta movk x0, #(TSP_HANDLED_S_EL1_FIQ & 0xffff) 3056cf89021SAchin Gupta smc #0 3066cf89021SAchin Gupta 3076cf89021SAchin Guptatsp_fiq_entry_panic: 3086cf89021SAchin Gupta b tsp_fiq_entry_panic 3096cf89021SAchin Gupta 3106cf89021SAchin Gupta /*--------------------------------------------- 3117c88f3f6SAchin Gupta * This entrypoint is used by the TSPD when this 3127c88f3f6SAchin Gupta * cpu resumes execution after an earlier 3137c88f3f6SAchin Gupta * CPU_SUSPEND psci call to ask the TSP to 3147c88f3f6SAchin Gupta * restore its saved context. In the current 3157c88f3f6SAchin Gupta * implementation, the TSPD saves and restores 3167c88f3f6SAchin Gupta * EL1 state so nothing is done here apart from 3177c88f3f6SAchin Gupta * acknowledging the request. 3187c88f3f6SAchin Gupta * --------------------------------------------- 3197c88f3f6SAchin Gupta */ 3200a30cf54SAndrew Thoelkefunc tsp_cpu_resume_entry 3217c88f3f6SAchin Gupta bl tsp_cpu_resume_main 3227c88f3f6SAchin Gupta restore_args_call_smc 3237c88f3f6SAchin Guptatsp_cpu_resume_panic: 3247c88f3f6SAchin Gupta b tsp_cpu_resume_panic 3257c88f3f6SAchin Gupta 3267c88f3f6SAchin Gupta /*--------------------------------------------- 3277c88f3f6SAchin Gupta * This entrypoint is used by the TSPD to ask 3287c88f3f6SAchin Gupta * the TSP to service a fast smc request. 3297c88f3f6SAchin Gupta * --------------------------------------------- 3307c88f3f6SAchin Gupta */ 3310a30cf54SAndrew Thoelkefunc tsp_fast_smc_entry 332239b04faSSoby Mathew bl tsp_smc_handler 3337c88f3f6SAchin Gupta restore_args_call_smc 3347c88f3f6SAchin Guptatsp_fast_smc_entry_panic: 3357c88f3f6SAchin Gupta b tsp_fast_smc_entry_panic 3367c88f3f6SAchin Gupta 337239b04faSSoby Mathew /*--------------------------------------------- 338239b04faSSoby Mathew * This entrypoint is used by the TSPD to ask 339239b04faSSoby Mathew * the TSP to service a std smc request. 340239b04faSSoby Mathew * We will enable preemption during execution 341239b04faSSoby Mathew * of tsp_smc_handler. 342239b04faSSoby Mathew * --------------------------------------------- 343239b04faSSoby Mathew */ 344239b04faSSoby Mathewfunc tsp_std_smc_entry 345239b04faSSoby Mathew msr daifclr, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 346239b04faSSoby Mathew bl tsp_smc_handler 347239b04faSSoby Mathew msr daifset, #DAIF_FIQ_BIT | DAIF_IRQ_BIT 348239b04faSSoby Mathew restore_args_call_smc 349239b04faSSoby Mathewtsp_std_smc_entry_panic: 350239b04faSSoby Mathew b tsp_std_smc_entry_panic 351