1375f538aSAchin Gupta /* 2*16292f54SDavid Cunado * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. 3375f538aSAchin Gupta * 4375f538aSAchin Gupta * Redistribution and use in source and binary forms, with or without 5375f538aSAchin Gupta * modification, are permitted provided that the following conditions are met: 6375f538aSAchin Gupta * 7375f538aSAchin Gupta * Redistributions of source code must retain the above copyright notice, this 8375f538aSAchin Gupta * list of conditions and the following disclaimer. 9375f538aSAchin Gupta * 10375f538aSAchin Gupta * Redistributions in binary form must reproduce the above copyright notice, 11375f538aSAchin Gupta * this list of conditions and the following disclaimer in the documentation 12375f538aSAchin Gupta * and/or other materials provided with the distribution. 13375f538aSAchin Gupta * 14375f538aSAchin Gupta * Neither the name of ARM nor the names of its contributors may be used 15375f538aSAchin Gupta * to endorse or promote products derived from this software without specific 16375f538aSAchin Gupta * prior written permission. 17375f538aSAchin Gupta * 18375f538aSAchin Gupta * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19375f538aSAchin Gupta * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20375f538aSAchin Gupta * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21375f538aSAchin Gupta * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22375f538aSAchin Gupta * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23375f538aSAchin Gupta * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24375f538aSAchin Gupta * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25375f538aSAchin Gupta * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26375f538aSAchin Gupta * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27375f538aSAchin Gupta * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28375f538aSAchin Gupta * POSSIBILITY OF SUCH DAMAGE. 29375f538aSAchin Gupta */ 30375f538aSAchin Gupta 3197043ac9SDan Handley #ifndef __TSPD_PRIVATE_H__ 3297043ac9SDan Handley #define __TSPD_PRIVATE_H__ 33375f538aSAchin Gupta 34375f538aSAchin Gupta #include <arch.h> 3597043ac9SDan Handley #include <context.h> 36b44a4435SAchin Gupta #include <interrupt_mgmt.h> 375f0cdb05SDan Handley #include <platform_def.h> 38375f538aSAchin Gupta #include <psci.h> 39375f538aSAchin Gupta 40375f538aSAchin Gupta /******************************************************************************* 41375f538aSAchin Gupta * Secure Payload PM state information e.g. SP is suspended, uninitialised etc 423ee8a164SAchin Gupta * and macros to access the state information in the per-cpu 'state' flags 43375f538aSAchin Gupta ******************************************************************************/ 443ee8a164SAchin Gupta #define TSP_PSTATE_OFF 0 453ee8a164SAchin Gupta #define TSP_PSTATE_ON 1 463ee8a164SAchin Gupta #define TSP_PSTATE_SUSPEND 2 473ee8a164SAchin Gupta #define TSP_PSTATE_SHIFT 0 483ee8a164SAchin Gupta #define TSP_PSTATE_MASK 0x3 493ee8a164SAchin Gupta #define get_tsp_pstate(state) ((state >> TSP_PSTATE_SHIFT) & TSP_PSTATE_MASK) 503ee8a164SAchin Gupta #define clr_tsp_pstate(state) (state &= ~(TSP_PSTATE_MASK \ 513ee8a164SAchin Gupta << TSP_PSTATE_SHIFT)) 523ee8a164SAchin Gupta #define set_tsp_pstate(st, pst) do { \ 533ee8a164SAchin Gupta clr_tsp_pstate(st); \ 543ee8a164SAchin Gupta st |= (pst & TSP_PSTATE_MASK) << \ 553ee8a164SAchin Gupta TSP_PSTATE_SHIFT; \ 563ee8a164SAchin Gupta } while (0); 573ee8a164SAchin Gupta 583ee8a164SAchin Gupta 593ee8a164SAchin Gupta /* 60*16292f54SDavid Cunado * This flag is used by the TSPD to determine if the TSP is servicing a yielding 613ee8a164SAchin Gupta * SMC request prior to programming the next entry into the TSP e.g. if TSP 623ee8a164SAchin Gupta * execution is preempted by a non-secure interrupt and handed control to the 633ee8a164SAchin Gupta * normal world. If another request which is distinct from what the TSP was 643ee8a164SAchin Gupta * previously doing arrives, then this flag will be help the TSPD to either 653ee8a164SAchin Gupta * reject the new request or service it while ensuring that the previous context 663ee8a164SAchin Gupta * is not corrupted. 673ee8a164SAchin Gupta */ 68*16292f54SDavid Cunado #define YIELD_SMC_ACTIVE_FLAG_SHIFT 2 69*16292f54SDavid Cunado #define YIELD_SMC_ACTIVE_FLAG_MASK 1 70*16292f54SDavid Cunado #define get_yield_smc_active_flag(state) \ 71*16292f54SDavid Cunado ((state >> YIELD_SMC_ACTIVE_FLAG_SHIFT) \ 72*16292f54SDavid Cunado & YIELD_SMC_ACTIVE_FLAG_MASK) 73*16292f54SDavid Cunado #define set_yield_smc_active_flag(state) (state |= \ 74*16292f54SDavid Cunado 1 << YIELD_SMC_ACTIVE_FLAG_SHIFT) 75*16292f54SDavid Cunado #define clr_yield_smc_active_flag(state) (state &= \ 76*16292f54SDavid Cunado ~(YIELD_SMC_ACTIVE_FLAG_MASK \ 77*16292f54SDavid Cunado << YIELD_SMC_ACTIVE_FLAG_SHIFT)) 78375f538aSAchin Gupta 79375f538aSAchin Gupta /******************************************************************************* 80375f538aSAchin Gupta * Secure Payload execution state information i.e. aarch32 or aarch64 81375f538aSAchin Gupta ******************************************************************************/ 82375f538aSAchin Gupta #define TSP_AARCH32 MODE_RW_32 83375f538aSAchin Gupta #define TSP_AARCH64 MODE_RW_64 84375f538aSAchin Gupta 85375f538aSAchin Gupta /******************************************************************************* 86375f538aSAchin Gupta * The SPD should know the type of Secure Payload. 87375f538aSAchin Gupta ******************************************************************************/ 88375f538aSAchin Gupta #define TSP_TYPE_UP PSCI_TOS_NOT_UP_MIG_CAP 89375f538aSAchin Gupta #define TSP_TYPE_UPM PSCI_TOS_UP_MIG_CAP 90375f538aSAchin Gupta #define TSP_TYPE_MP PSCI_TOS_NOT_PRESENT_MP 91375f538aSAchin Gupta 92375f538aSAchin Gupta /******************************************************************************* 93375f538aSAchin Gupta * Secure Payload migrate type information as known to the SPD. We assume that 94375f538aSAchin Gupta * the SPD is dealing with an MP Secure Payload. 95375f538aSAchin Gupta ******************************************************************************/ 96375f538aSAchin Gupta #define TSP_MIGRATE_INFO TSP_TYPE_MP 97375f538aSAchin Gupta 98375f538aSAchin Gupta /******************************************************************************* 99375f538aSAchin Gupta * Number of cpus that the present on this platform. TODO: Rely on a topology 100375f538aSAchin Gupta * tree to determine this in the future to avoid assumptions about mpidr 101375f538aSAchin Gupta * allocation 102375f538aSAchin Gupta ******************************************************************************/ 103375f538aSAchin Gupta #define TSPD_CORE_COUNT PLATFORM_CORE_COUNT 104375f538aSAchin Gupta 105375f538aSAchin Gupta /******************************************************************************* 106375f538aSAchin Gupta * Constants that allow assembler code to preserve callee-saved registers of the 107375f538aSAchin Gupta * C runtime context while performing a security state switch. 108375f538aSAchin Gupta ******************************************************************************/ 109375f538aSAchin Gupta #define TSPD_C_RT_CTX_X19 0x0 110375f538aSAchin Gupta #define TSPD_C_RT_CTX_X20 0x8 111375f538aSAchin Gupta #define TSPD_C_RT_CTX_X21 0x10 112375f538aSAchin Gupta #define TSPD_C_RT_CTX_X22 0x18 113375f538aSAchin Gupta #define TSPD_C_RT_CTX_X23 0x20 114375f538aSAchin Gupta #define TSPD_C_RT_CTX_X24 0x28 115375f538aSAchin Gupta #define TSPD_C_RT_CTX_X25 0x30 116375f538aSAchin Gupta #define TSPD_C_RT_CTX_X26 0x38 117375f538aSAchin Gupta #define TSPD_C_RT_CTX_X27 0x40 118375f538aSAchin Gupta #define TSPD_C_RT_CTX_X28 0x48 119375f538aSAchin Gupta #define TSPD_C_RT_CTX_X29 0x50 120375f538aSAchin Gupta #define TSPD_C_RT_CTX_X30 0x58 121375f538aSAchin Gupta #define TSPD_C_RT_CTX_SIZE 0x60 122375f538aSAchin Gupta #define TSPD_C_RT_CTX_ENTRIES (TSPD_C_RT_CTX_SIZE >> DWORD_SHIFT) 123375f538aSAchin Gupta 124f4f1ae77SSoby Mathew /******************************************************************************* 125f4f1ae77SSoby Mathew * Constants that allow assembler code to preserve caller-saved registers of the 126f4f1ae77SSoby Mathew * SP context while performing a TSP preemption. 127f4f1ae77SSoby Mathew * Note: These offsets have to match with the offsets for the corresponding 128f4f1ae77SSoby Mathew * registers in cpu_context as we are using memcpy to copy the values from 129f4f1ae77SSoby Mathew * cpu_context to sp_ctx. 130f4f1ae77SSoby Mathew ******************************************************************************/ 131f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X0 0x0 132f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X1 0x8 133f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X2 0x10 134f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X3 0x18 135f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X4 0x20 136f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X5 0x28 137f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X6 0x30 138f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X7 0x38 139f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X8 0x40 140f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X9 0x48 141f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X10 0x50 142f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X11 0x58 143f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X12 0x60 144f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X13 0x68 145f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X14 0x70 146f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X15 0x78 147f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X16 0x80 148f4f1ae77SSoby Mathew #define TSPD_SP_CTX_X17 0x88 149f4f1ae77SSoby Mathew #define TSPD_SP_CTX_SIZE 0x90 150f4f1ae77SSoby Mathew #define TSPD_SP_CTX_ENTRIES (TSPD_SP_CTX_SIZE >> DWORD_SHIFT) 151f4f1ae77SSoby Mathew 152375f538aSAchin Gupta #ifndef __ASSEMBLY__ 153375f538aSAchin Gupta 15497043ac9SDan Handley #include <cassert.h> 15597043ac9SDan Handley #include <stdint.h> 15697043ac9SDan Handley 157239b04faSSoby Mathew /* 158239b04faSSoby Mathew * The number of arguments to save during a SMC call for TSP. 159239b04faSSoby Mathew * Currently only x1 and x2 are used by TSP. 160239b04faSSoby Mathew */ 161239b04faSSoby Mathew #define TSP_NUM_ARGS 0x2 162239b04faSSoby Mathew 163375f538aSAchin Gupta /* AArch64 callee saved general purpose register context structure. */ 164375f538aSAchin Gupta DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES); 165375f538aSAchin Gupta 166375f538aSAchin Gupta /* 167375f538aSAchin Gupta * Compile time assertion to ensure that both the compiler and linker 168375f538aSAchin Gupta * have the same double word aligned view of the size of the C runtime 169375f538aSAchin Gupta * register context. 170375f538aSAchin Gupta */ 171fb037bfbSDan Handley CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t), \ 172375f538aSAchin Gupta assert_spd_c_rt_regs_size_mismatch); 173375f538aSAchin Gupta 174f4f1ae77SSoby Mathew /* SEL1 Secure payload (SP) caller saved register context structure. */ 175f4f1ae77SSoby Mathew DEFINE_REG_STRUCT(sp_ctx_regs, TSPD_SP_CTX_ENTRIES); 176f4f1ae77SSoby Mathew 177f4f1ae77SSoby Mathew /* 178f4f1ae77SSoby Mathew * Compile time assertion to ensure that both the compiler and linker 179f4f1ae77SSoby Mathew * have the same double word aligned view of the size of the C runtime 180f4f1ae77SSoby Mathew * register context. 181f4f1ae77SSoby Mathew */ 182f4f1ae77SSoby Mathew CASSERT(TSPD_SP_CTX_SIZE == sizeof(sp_ctx_regs_t), \ 183f4f1ae77SSoby Mathew assert_spd_sp_regs_size_mismatch); 184f4f1ae77SSoby Mathew 185375f538aSAchin Gupta /******************************************************************************* 186375f538aSAchin Gupta * Structure which helps the SPD to maintain the per-cpu state of the SP. 18702446137SSoby Mathew * 'saved_spsr_el3' - temporary copy to allow S-EL1 interrupt handling when 18802446137SSoby Mathew * the TSP has been preempted. 18902446137SSoby Mathew * 'saved_elr_el3' - temporary copy to allow S-EL1 interrupt handling when 19002446137SSoby Mathew * the TSP has been preempted. 191375f538aSAchin Gupta * 'state' - collection of flags to track SP state e.g. on/off 192375f538aSAchin Gupta * 'mpidr' - mpidr to associate a context with a cpu 193b44a4435SAchin Gupta * 'c_rt_ctx' - stack address to restore C runtime context from after 194b44a4435SAchin Gupta * returning from a synchronous entry into the SP. 195375f538aSAchin Gupta * 'cpu_ctx' - space to maintain SP architectural state 196239b04faSSoby Mathew * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations 197239b04faSSoby Mathew * which will queried using the TSP_GET_ARGS SMC by TSP. 198f4f1ae77SSoby Mathew * 'sp_ctx' - space to save the SEL1 Secure Payload(SP) caller saved 199f4f1ae77SSoby Mathew * register context after it has been preempted by an EL3 200f4f1ae77SSoby Mathew * routed NS interrupt and when a Secure Interrupt is taken 201f4f1ae77SSoby Mathew * to SP. 202375f538aSAchin Gupta ******************************************************************************/ 203fb037bfbSDan Handley typedef struct tsp_context { 204b44a4435SAchin Gupta uint64_t saved_elr_el3; 205b44a4435SAchin Gupta uint32_t saved_spsr_el3; 206375f538aSAchin Gupta uint32_t state; 207375f538aSAchin Gupta uint64_t mpidr; 208375f538aSAchin Gupta uint64_t c_rt_ctx; 209fb037bfbSDan Handley cpu_context_t cpu_ctx; 210239b04faSSoby Mathew uint64_t saved_tsp_args[TSP_NUM_ARGS]; 21102446137SSoby Mathew #if TSP_NS_INTR_ASYNC_PREEMPT 212f4f1ae77SSoby Mathew sp_ctx_regs_t sp_ctx; 213f4f1ae77SSoby Mathew #endif 214fb037bfbSDan Handley } tsp_context_t; 215375f538aSAchin Gupta 216239b04faSSoby Mathew /* Helper macros to store and retrieve tsp args from tsp_context */ 217239b04faSSoby Mathew #define store_tsp_args(tsp_ctx, x1, x2) do {\ 218239b04faSSoby Mathew tsp_ctx->saved_tsp_args[0] = x1;\ 219239b04faSSoby Mathew tsp_ctx->saved_tsp_args[1] = x2;\ 220239b04faSSoby Mathew } while (0) 221239b04faSSoby Mathew 222239b04faSSoby Mathew #define get_tsp_args(tsp_ctx, x1, x2) do {\ 223239b04faSSoby Mathew x1 = tsp_ctx->saved_tsp_args[0];\ 224239b04faSSoby Mathew x2 = tsp_ctx->saved_tsp_args[1];\ 225239b04faSSoby Mathew } while (0) 226239b04faSSoby Mathew 2277f366605SJeenu Viswambharan /* TSPD power management handlers */ 228fb037bfbSDan Handley extern const spd_pm_ops_t tspd_pm; 2297f366605SJeenu Viswambharan 230375f538aSAchin Gupta /******************************************************************************* 23197043ac9SDan Handley * Forward declarations 23297043ac9SDan Handley ******************************************************************************/ 233399fb08fSAndrew Thoelke struct tsp_vectors; 23497043ac9SDan Handley 23597043ac9SDan Handley /******************************************************************************* 236375f538aSAchin Gupta * Function & Data prototypes 237375f538aSAchin Gupta ******************************************************************************/ 238c6bc0710SDan Handley uint64_t tspd_enter_sp(uint64_t *c_rt_ctx); 239c6bc0710SDan Handley void __dead2 tspd_exit_sp(uint64_t c_rt_ctx, uint64_t ret); 240c6bc0710SDan Handley uint64_t tspd_synchronous_sp_entry(tsp_context_t *tsp_ctx); 241c6bc0710SDan Handley void __dead2 tspd_synchronous_sp_exit(tsp_context_t *tsp_ctx, uint64_t ret); 24250e27dadSVikram Kanigiri void tspd_init_tsp_ep_state(struct entry_point_info *tsp_ep, 243375f538aSAchin Gupta uint32_t rw, 24450e27dadSVikram Kanigiri uint64_t pc, 245fb037bfbSDan Handley tsp_context_t *tsp_ctx); 2463df6012aSDouglas Raillard int tspd_abort_preempted_smc(tsp_context_t *tsp_ctx); 24750e27dadSVikram Kanigiri 248fb037bfbSDan Handley extern tsp_context_t tspd_sp_context[TSPD_CORE_COUNT]; 249399fb08fSAndrew Thoelke extern struct tsp_vectors *tsp_vectors; 250375f538aSAchin Gupta #endif /*__ASSEMBLY__*/ 251375f538aSAchin Gupta 25297043ac9SDan Handley #endif /* __TSPD_PRIVATE_H__ */ 253