1948c090dSVarun Wadekar /* 2948c090dSVarun Wadekar * Copyright (c) 2016, ARM Limited and Contributors. All rights reserved. 3948c090dSVarun Wadekar * 4948c090dSVarun Wadekar * Redistribution and use in source and binary forms, with or without 5948c090dSVarun Wadekar * modification, are permitted provided that the following conditions are met: 6948c090dSVarun Wadekar * 7948c090dSVarun Wadekar * Redistributions of source code must retain the above copyright notice, this 8948c090dSVarun Wadekar * list of conditions and the following disclaimer. 9948c090dSVarun Wadekar * 10948c090dSVarun Wadekar * Redistributions in binary form must reproduce the above copyright notice, 11948c090dSVarun Wadekar * this list of conditions and the following disclaimer in the documentation 12948c090dSVarun Wadekar * and/or other materials provided with the distribution. 13948c090dSVarun Wadekar * 14948c090dSVarun Wadekar * Neither the name of ARM nor the names of its contributors may be used 15948c090dSVarun Wadekar * to endorse or promote products derived from this software without specific 16948c090dSVarun Wadekar * prior written permission. 17948c090dSVarun Wadekar * 18948c090dSVarun Wadekar * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19948c090dSVarun Wadekar * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20948c090dSVarun Wadekar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21948c090dSVarun Wadekar * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 22948c090dSVarun Wadekar * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23948c090dSVarun Wadekar * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24948c090dSVarun Wadekar * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25948c090dSVarun Wadekar * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26948c090dSVarun Wadekar * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27948c090dSVarun Wadekar * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28948c090dSVarun Wadekar * POSSIBILITY OF SUCH DAMAGE. 29948c090dSVarun Wadekar */ 30948c090dSVarun Wadekar 31dae374bfSAnthony Zhou #include <arch_helpers.h> 32dae374bfSAnthony Zhou #include <assert.h> /* for context_mgmt.h */ 33948c090dSVarun Wadekar #include <bl_common.h> 34948c090dSVarun Wadekar #include <bl31.h> 35948c090dSVarun Wadekar #include <context_mgmt.h> 36948c090dSVarun Wadekar #include <debug.h> 37948c090dSVarun Wadekar #include <interrupt_mgmt.h> 38948c090dSVarun Wadekar #include <platform.h> 39948c090dSVarun Wadekar #include <runtime_svc.h> 40948c090dSVarun Wadekar #include <string.h> 41948c090dSVarun Wadekar 42948c090dSVarun Wadekar #include "smcall.h" 43948c090dSVarun Wadekar #include "sm_err.h" 44948c090dSVarun Wadekar 45dae374bfSAnthony Zhou /* macro to check if Hypervisor is enabled in the HCR_EL2 register */ 46dae374bfSAnthony Zhou #define HYP_ENABLE_FLAG 0x286001 47dae374bfSAnthony Zhou 48948c090dSVarun Wadekar struct trusty_stack { 49948c090dSVarun Wadekar uint8_t space[PLATFORM_STACK_SIZE] __aligned(16); 50948c090dSVarun Wadekar }; 51948c090dSVarun Wadekar 52948c090dSVarun Wadekar struct trusty_cpu_ctx { 53948c090dSVarun Wadekar cpu_context_t cpu_ctx; 54948c090dSVarun Wadekar void *saved_sp; 55948c090dSVarun Wadekar uint32_t saved_security_state; 56948c090dSVarun Wadekar int fiq_handler_active; 57948c090dSVarun Wadekar uint64_t fiq_handler_pc; 58948c090dSVarun Wadekar uint64_t fiq_handler_cpsr; 59948c090dSVarun Wadekar uint64_t fiq_handler_sp; 60948c090dSVarun Wadekar uint64_t fiq_pc; 61948c090dSVarun Wadekar uint64_t fiq_cpsr; 62948c090dSVarun Wadekar uint64_t fiq_sp_el1; 63948c090dSVarun Wadekar gp_regs_t fiq_gpregs; 64948c090dSVarun Wadekar struct trusty_stack secure_stack; 65948c090dSVarun Wadekar }; 66948c090dSVarun Wadekar 67948c090dSVarun Wadekar struct args { 68948c090dSVarun Wadekar uint64_t r0; 69948c090dSVarun Wadekar uint64_t r1; 70948c090dSVarun Wadekar uint64_t r2; 71948c090dSVarun Wadekar uint64_t r3; 72dae374bfSAnthony Zhou uint64_t r4; 73dae374bfSAnthony Zhou uint64_t r5; 74dae374bfSAnthony Zhou uint64_t r6; 75dae374bfSAnthony Zhou uint64_t r7; 76948c090dSVarun Wadekar }; 77948c090dSVarun Wadekar 78948c090dSVarun Wadekar struct trusty_cpu_ctx trusty_cpu_ctx[PLATFORM_CORE_COUNT]; 79948c090dSVarun Wadekar 80948c090dSVarun Wadekar struct args trusty_init_context_stack(void **sp, void *new_stack); 81dae374bfSAnthony Zhou struct args trusty_context_switch_helper(void **sp, void *smc_params); 82948c090dSVarun Wadekar 83*64c07d0fSAnthony Zhou static uint32_t current_vmid; 84*64c07d0fSAnthony Zhou 85948c090dSVarun Wadekar static struct trusty_cpu_ctx *get_trusty_ctx(void) 86948c090dSVarun Wadekar { 87948c090dSVarun Wadekar return &trusty_cpu_ctx[plat_my_core_pos()]; 88948c090dSVarun Wadekar } 89948c090dSVarun Wadekar 90dae374bfSAnthony Zhou static uint32_t is_hypervisor_mode(void) 91dae374bfSAnthony Zhou { 92dae374bfSAnthony Zhou uint64_t hcr = read_hcr(); 93dae374bfSAnthony Zhou 94dae374bfSAnthony Zhou return !!(hcr & HYP_ENABLE_FLAG); 95dae374bfSAnthony Zhou } 96dae374bfSAnthony Zhou 97948c090dSVarun Wadekar static struct args trusty_context_switch(uint32_t security_state, uint64_t r0, 98948c090dSVarun Wadekar uint64_t r1, uint64_t r2, uint64_t r3) 99948c090dSVarun Wadekar { 100948c090dSVarun Wadekar struct args ret; 101948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 102dae374bfSAnthony Zhou struct trusty_cpu_ctx *ctx_smc; 103948c090dSVarun Wadekar 104948c090dSVarun Wadekar assert(ctx->saved_security_state != security_state); 105948c090dSVarun Wadekar 106dae374bfSAnthony Zhou ret.r7 = 0; 107dae374bfSAnthony Zhou if (is_hypervisor_mode()) { 108dae374bfSAnthony Zhou /* According to the ARM DEN0028A spec, VMID is stored in x7 */ 109dae374bfSAnthony Zhou ctx_smc = cm_get_context(NON_SECURE); 110dae374bfSAnthony Zhou assert(ctx_smc); 111dae374bfSAnthony Zhou ret.r7 = SMC_GET_GP(ctx_smc, CTX_GPREG_X7); 112dae374bfSAnthony Zhou } 113dae374bfSAnthony Zhou /* r4, r5, r6 reserved for future use. */ 114dae374bfSAnthony Zhou ret.r6 = 0; 115dae374bfSAnthony Zhou ret.r5 = 0; 116dae374bfSAnthony Zhou ret.r4 = 0; 117dae374bfSAnthony Zhou ret.r3 = r3; 118dae374bfSAnthony Zhou ret.r2 = r2; 119dae374bfSAnthony Zhou ret.r1 = r1; 120dae374bfSAnthony Zhou ret.r0 = r0; 121dae374bfSAnthony Zhou 122948c090dSVarun Wadekar cm_el1_sysregs_context_save(security_state); 123948c090dSVarun Wadekar 124948c090dSVarun Wadekar ctx->saved_security_state = security_state; 125dae374bfSAnthony Zhou ret = trusty_context_switch_helper(&ctx->saved_sp, &ret); 126948c090dSVarun Wadekar 127948c090dSVarun Wadekar assert(ctx->saved_security_state == !security_state); 128948c090dSVarun Wadekar 129948c090dSVarun Wadekar cm_el1_sysregs_context_restore(security_state); 130948c090dSVarun Wadekar cm_set_next_eret_context(security_state); 131948c090dSVarun Wadekar 132948c090dSVarun Wadekar return ret; 133948c090dSVarun Wadekar } 134948c090dSVarun Wadekar 135948c090dSVarun Wadekar static uint64_t trusty_fiq_handler(uint32_t id, 136948c090dSVarun Wadekar uint32_t flags, 137948c090dSVarun Wadekar void *handle, 138948c090dSVarun Wadekar void *cookie) 139948c090dSVarun Wadekar { 140948c090dSVarun Wadekar struct args ret; 141948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 142948c090dSVarun Wadekar 143948c090dSVarun Wadekar assert(!is_caller_secure(flags)); 144948c090dSVarun Wadekar 145948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_ENTER, 0, 0, 0); 146948c090dSVarun Wadekar if (ret.r0) { 147948c090dSVarun Wadekar SMC_RET0(handle); 148948c090dSVarun Wadekar } 149948c090dSVarun Wadekar 150948c090dSVarun Wadekar if (ctx->fiq_handler_active) { 151948c090dSVarun Wadekar INFO("%s: fiq handler already active\n", __func__); 152948c090dSVarun Wadekar SMC_RET0(handle); 153948c090dSVarun Wadekar } 154948c090dSVarun Wadekar 155948c090dSVarun Wadekar ctx->fiq_handler_active = 1; 156948c090dSVarun Wadekar memcpy(&ctx->fiq_gpregs, get_gpregs_ctx(handle), sizeof(ctx->fiq_gpregs)); 157948c090dSVarun Wadekar ctx->fiq_pc = SMC_GET_EL3(handle, CTX_ELR_EL3); 158948c090dSVarun Wadekar ctx->fiq_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3); 159948c090dSVarun Wadekar ctx->fiq_sp_el1 = read_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1); 160948c090dSVarun Wadekar 161948c090dSVarun Wadekar write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_handler_sp); 162948c090dSVarun Wadekar cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_handler_pc, ctx->fiq_handler_cpsr); 163948c090dSVarun Wadekar 164948c090dSVarun Wadekar SMC_RET0(handle); 165948c090dSVarun Wadekar } 166948c090dSVarun Wadekar 167948c090dSVarun Wadekar static uint64_t trusty_set_fiq_handler(void *handle, uint64_t cpu, 168948c090dSVarun Wadekar uint64_t handler, uint64_t stack) 169948c090dSVarun Wadekar { 170948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx; 171948c090dSVarun Wadekar 172948c090dSVarun Wadekar if (cpu >= PLATFORM_CORE_COUNT) { 173948c090dSVarun Wadekar ERROR("%s: cpu %ld >= %d\n", __func__, cpu, PLATFORM_CORE_COUNT); 174948c090dSVarun Wadekar return SM_ERR_INVALID_PARAMETERS; 175948c090dSVarun Wadekar } 176948c090dSVarun Wadekar 177948c090dSVarun Wadekar ctx = &trusty_cpu_ctx[cpu]; 178948c090dSVarun Wadekar ctx->fiq_handler_pc = handler; 179948c090dSVarun Wadekar ctx->fiq_handler_cpsr = SMC_GET_EL3(handle, CTX_SPSR_EL3); 180948c090dSVarun Wadekar ctx->fiq_handler_sp = stack; 181948c090dSVarun Wadekar 182948c090dSVarun Wadekar SMC_RET1(handle, 0); 183948c090dSVarun Wadekar } 184948c090dSVarun Wadekar 185948c090dSVarun Wadekar static uint64_t trusty_get_fiq_regs(void *handle) 186948c090dSVarun Wadekar { 187948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 188948c090dSVarun Wadekar uint64_t sp_el0 = read_ctx_reg(&ctx->fiq_gpregs, CTX_GPREG_SP_EL0); 189948c090dSVarun Wadekar 190948c090dSVarun Wadekar SMC_RET4(handle, ctx->fiq_pc, ctx->fiq_cpsr, sp_el0, ctx->fiq_sp_el1); 191948c090dSVarun Wadekar } 192948c090dSVarun Wadekar 193948c090dSVarun Wadekar static uint64_t trusty_fiq_exit(void *handle, uint64_t x1, uint64_t x2, uint64_t x3) 194948c090dSVarun Wadekar { 195948c090dSVarun Wadekar struct args ret; 196948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 197948c090dSVarun Wadekar 198948c090dSVarun Wadekar if (!ctx->fiq_handler_active) { 199948c090dSVarun Wadekar NOTICE("%s: fiq handler not active\n", __func__); 200948c090dSVarun Wadekar SMC_RET1(handle, SM_ERR_INVALID_PARAMETERS); 201948c090dSVarun Wadekar } 202948c090dSVarun Wadekar 203948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, SMC_FC_FIQ_EXIT, 0, 0, 0); 204948c090dSVarun Wadekar if (ret.r0 != 1) { 205948c090dSVarun Wadekar INFO("%s(%p) SMC_FC_FIQ_EXIT returned unexpected value, %ld\n", 206948c090dSVarun Wadekar __func__, handle, ret.r0); 207948c090dSVarun Wadekar } 208948c090dSVarun Wadekar 209948c090dSVarun Wadekar /* 210948c090dSVarun Wadekar * Restore register state to state recorded on fiq entry. 211948c090dSVarun Wadekar * 212948c090dSVarun Wadekar * x0, sp_el1, pc and cpsr need to be restored because el1 cannot 213948c090dSVarun Wadekar * restore them. 214948c090dSVarun Wadekar * 215948c090dSVarun Wadekar * x1-x4 and x8-x17 need to be restored here because smc_handler64 216948c090dSVarun Wadekar * corrupts them (el1 code also restored them). 217948c090dSVarun Wadekar */ 218948c090dSVarun Wadekar memcpy(get_gpregs_ctx(handle), &ctx->fiq_gpregs, sizeof(ctx->fiq_gpregs)); 219948c090dSVarun Wadekar ctx->fiq_handler_active = 0; 220948c090dSVarun Wadekar write_ctx_reg(get_sysregs_ctx(handle), CTX_SP_EL1, ctx->fiq_sp_el1); 221948c090dSVarun Wadekar cm_set_elr_spsr_el3(NON_SECURE, ctx->fiq_pc, ctx->fiq_cpsr); 222948c090dSVarun Wadekar 223948c090dSVarun Wadekar SMC_RET0(handle); 224948c090dSVarun Wadekar } 225948c090dSVarun Wadekar 226948c090dSVarun Wadekar static uint64_t trusty_smc_handler(uint32_t smc_fid, 227948c090dSVarun Wadekar uint64_t x1, 228948c090dSVarun Wadekar uint64_t x2, 229948c090dSVarun Wadekar uint64_t x3, 230948c090dSVarun Wadekar uint64_t x4, 231948c090dSVarun Wadekar void *cookie, 232948c090dSVarun Wadekar void *handle, 233948c090dSVarun Wadekar uint64_t flags) 234948c090dSVarun Wadekar { 235948c090dSVarun Wadekar struct args ret; 236*64c07d0fSAnthony Zhou uint32_t vmid = 0; 237948c090dSVarun Wadekar 238948c090dSVarun Wadekar if (is_caller_secure(flags)) { 239948c090dSVarun Wadekar if (smc_fid == SMC_SC_NS_RETURN) { 240948c090dSVarun Wadekar ret = trusty_context_switch(SECURE, x1, 0, 0, 0); 241dae374bfSAnthony Zhou SMC_RET8(handle, ret.r0, ret.r1, ret.r2, ret.r3, 242dae374bfSAnthony Zhou ret.r4, ret.r5, ret.r6, ret.r7); 243948c090dSVarun Wadekar } 244948c090dSVarun Wadekar INFO("%s (0x%x, 0x%lx, 0x%lx, 0x%lx, 0x%lx, %p, %p, 0x%lx) \ 245948c090dSVarun Wadekar cpu %d, unknown smc\n", 246948c090dSVarun Wadekar __func__, smc_fid, x1, x2, x3, x4, cookie, handle, flags, 247948c090dSVarun Wadekar plat_my_core_pos()); 248948c090dSVarun Wadekar SMC_RET1(handle, SMC_UNK); 249948c090dSVarun Wadekar } else { 250948c090dSVarun Wadekar switch (smc_fid) { 251948c090dSVarun Wadekar case SMC_FC64_SET_FIQ_HANDLER: 252948c090dSVarun Wadekar return trusty_set_fiq_handler(handle, x1, x2, x3); 253948c090dSVarun Wadekar case SMC_FC64_GET_FIQ_REGS: 254948c090dSVarun Wadekar return trusty_get_fiq_regs(handle); 255948c090dSVarun Wadekar case SMC_FC_FIQ_EXIT: 256948c090dSVarun Wadekar return trusty_fiq_exit(handle, x1, x2, x3); 257948c090dSVarun Wadekar default: 258*64c07d0fSAnthony Zhou if (is_hypervisor_mode()) 259*64c07d0fSAnthony Zhou vmid = SMC_GET_GP(handle, CTX_GPREG_X7); 260*64c07d0fSAnthony Zhou 261*64c07d0fSAnthony Zhou if ((current_vmid != 0) && (current_vmid != vmid)) { 262*64c07d0fSAnthony Zhou /* This message will cause SMC mechanism 263*64c07d0fSAnthony Zhou * abnormal in multi-guest environment. 264*64c07d0fSAnthony Zhou * Change it to WARN in case you need it. 265*64c07d0fSAnthony Zhou */ 266*64c07d0fSAnthony Zhou VERBOSE("Previous SMC not finished.\n"); 267*64c07d0fSAnthony Zhou SMC_RET1(handle, SM_ERR_BUSY); 268*64c07d0fSAnthony Zhou } 269*64c07d0fSAnthony Zhou current_vmid = vmid; 270948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, smc_fid, x1, 271948c090dSVarun Wadekar x2, x3); 272*64c07d0fSAnthony Zhou current_vmid = 0; 273948c090dSVarun Wadekar SMC_RET1(handle, ret.r0); 274948c090dSVarun Wadekar } 275948c090dSVarun Wadekar } 276948c090dSVarun Wadekar } 277948c090dSVarun Wadekar 278948c090dSVarun Wadekar static int32_t trusty_init(void) 279948c090dSVarun Wadekar { 28048c1c39fSSandrine Bailleux void el3_exit(void); 281948c090dSVarun Wadekar entry_point_info_t *ep_info; 282dae374bfSAnthony Zhou struct args zero_args = {0}; 283948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 284948c090dSVarun Wadekar uint32_t cpu = plat_my_core_pos(); 285948c090dSVarun Wadekar int reg_width = GET_RW(read_ctx_reg(get_el3state_ctx(&ctx->cpu_ctx), 286948c090dSVarun Wadekar CTX_SPSR_EL3)); 287948c090dSVarun Wadekar 288e97e413fSSandrine Bailleux /* 289e97e413fSSandrine Bailleux * Get information about the Trusty image. Its absence is a critical 290e97e413fSSandrine Bailleux * failure. 291e97e413fSSandrine Bailleux */ 292948c090dSVarun Wadekar ep_info = bl31_plat_get_next_image_ep_info(SECURE); 293e97e413fSSandrine Bailleux assert(ep_info); 294948c090dSVarun Wadekar 295948c090dSVarun Wadekar cm_el1_sysregs_context_save(NON_SECURE); 296948c090dSVarun Wadekar 297948c090dSVarun Wadekar cm_set_context(&ctx->cpu_ctx, SECURE); 298948c090dSVarun Wadekar cm_init_my_context(ep_info); 299948c090dSVarun Wadekar 300948c090dSVarun Wadekar /* 301948c090dSVarun Wadekar * Adjust secondary cpu entry point for 32 bit images to the 302948c090dSVarun Wadekar * end of exeption vectors 303948c090dSVarun Wadekar */ 304948c090dSVarun Wadekar if ((cpu != 0) && (reg_width == MODE_RW_32)) { 305948c090dSVarun Wadekar INFO("trusty: cpu %d, adjust entry point to 0x%lx\n", 306948c090dSVarun Wadekar cpu, ep_info->pc + (1U << 5)); 307948c090dSVarun Wadekar cm_set_elr_el3(SECURE, ep_info->pc + (1U << 5)); 308948c090dSVarun Wadekar } 309948c090dSVarun Wadekar 310948c090dSVarun Wadekar cm_el1_sysregs_context_restore(SECURE); 311948c090dSVarun Wadekar cm_set_next_eret_context(SECURE); 312948c090dSVarun Wadekar 313948c090dSVarun Wadekar ctx->saved_security_state = ~0; /* initial saved state is invalid */ 314948c090dSVarun Wadekar trusty_init_context_stack(&ctx->saved_sp, &ctx->secure_stack); 315948c090dSVarun Wadekar 316dae374bfSAnthony Zhou trusty_context_switch_helper(&ctx->saved_sp, &zero_args); 317948c090dSVarun Wadekar 318948c090dSVarun Wadekar cm_el1_sysregs_context_restore(NON_SECURE); 319948c090dSVarun Wadekar cm_set_next_eret_context(NON_SECURE); 320948c090dSVarun Wadekar 321948c090dSVarun Wadekar return 0; 322948c090dSVarun Wadekar } 323948c090dSVarun Wadekar 324948c090dSVarun Wadekar static void trusty_cpu_suspend(void) 325948c090dSVarun Wadekar { 326948c090dSVarun Wadekar struct args ret; 327948c090dSVarun Wadekar 328948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_SUSPEND, 0, 0, 0); 329948c090dSVarun Wadekar if (ret.r0 != 0) { 330948c090dSVarun Wadekar INFO("%s: cpu %d, SMC_FC_CPU_SUSPEND returned unexpected value, %ld\n", 331696f41ecSSandrine Bailleux __func__, plat_my_core_pos(), ret.r0); 332948c090dSVarun Wadekar } 333948c090dSVarun Wadekar } 334948c090dSVarun Wadekar 335948c090dSVarun Wadekar static void trusty_cpu_resume(void) 336948c090dSVarun Wadekar { 337948c090dSVarun Wadekar struct args ret; 338948c090dSVarun Wadekar 339948c090dSVarun Wadekar ret = trusty_context_switch(NON_SECURE, SMC_FC_CPU_RESUME, 0, 0, 0); 340948c090dSVarun Wadekar if (ret.r0 != 0) { 341948c090dSVarun Wadekar INFO("%s: cpu %d, SMC_FC_CPU_RESUME returned unexpected value, %ld\n", 342696f41ecSSandrine Bailleux __func__, plat_my_core_pos(), ret.r0); 343948c090dSVarun Wadekar } 344948c090dSVarun Wadekar } 345948c090dSVarun Wadekar 346948c090dSVarun Wadekar static int32_t trusty_cpu_off_handler(uint64_t unused) 347948c090dSVarun Wadekar { 348948c090dSVarun Wadekar trusty_cpu_suspend(); 349948c090dSVarun Wadekar 350948c090dSVarun Wadekar return 0; 351948c090dSVarun Wadekar } 352948c090dSVarun Wadekar 353948c090dSVarun Wadekar static void trusty_cpu_on_finish_handler(uint64_t unused) 354948c090dSVarun Wadekar { 355948c090dSVarun Wadekar struct trusty_cpu_ctx *ctx = get_trusty_ctx(); 356948c090dSVarun Wadekar 357948c090dSVarun Wadekar if (!ctx->saved_sp) { 358948c090dSVarun Wadekar trusty_init(); 359948c090dSVarun Wadekar } else { 360948c090dSVarun Wadekar trusty_cpu_resume(); 361948c090dSVarun Wadekar } 362948c090dSVarun Wadekar } 363948c090dSVarun Wadekar 364948c090dSVarun Wadekar static void trusty_cpu_suspend_handler(uint64_t unused) 365948c090dSVarun Wadekar { 366948c090dSVarun Wadekar trusty_cpu_suspend(); 367948c090dSVarun Wadekar } 368948c090dSVarun Wadekar 369948c090dSVarun Wadekar static void trusty_cpu_suspend_finish_handler(uint64_t unused) 370948c090dSVarun Wadekar { 371948c090dSVarun Wadekar trusty_cpu_resume(); 372948c090dSVarun Wadekar } 373948c090dSVarun Wadekar 374948c090dSVarun Wadekar static const spd_pm_ops_t trusty_pm = { 375948c090dSVarun Wadekar .svc_off = trusty_cpu_off_handler, 376948c090dSVarun Wadekar .svc_suspend = trusty_cpu_suspend_handler, 377948c090dSVarun Wadekar .svc_on_finish = trusty_cpu_on_finish_handler, 378948c090dSVarun Wadekar .svc_suspend_finish = trusty_cpu_suspend_finish_handler, 379948c090dSVarun Wadekar }; 380948c090dSVarun Wadekar 381948c090dSVarun Wadekar static int32_t trusty_setup(void) 382948c090dSVarun Wadekar { 383948c090dSVarun Wadekar entry_point_info_t *ep_info; 384948c090dSVarun Wadekar uint32_t instr; 385948c090dSVarun Wadekar uint32_t flags; 386948c090dSVarun Wadekar int ret; 387948c090dSVarun Wadekar int aarch32 = 0; 388948c090dSVarun Wadekar 389948c090dSVarun Wadekar ep_info = bl31_plat_get_next_image_ep_info(SECURE); 390948c090dSVarun Wadekar if (!ep_info) { 391948c090dSVarun Wadekar INFO("Trusty image missing.\n"); 392948c090dSVarun Wadekar return -1; 393948c090dSVarun Wadekar } 394948c090dSVarun Wadekar 395948c090dSVarun Wadekar instr = *(uint32_t *)ep_info->pc; 396948c090dSVarun Wadekar 397948c090dSVarun Wadekar if (instr >> 24 == 0xea) { 398948c090dSVarun Wadekar INFO("trusty: Found 32 bit image\n"); 399948c090dSVarun Wadekar aarch32 = 1; 400948c090dSVarun Wadekar } else if (instr >> 8 == 0xd53810) { 401948c090dSVarun Wadekar INFO("trusty: Found 64 bit image\n"); 402948c090dSVarun Wadekar } else { 403948c090dSVarun Wadekar INFO("trusty: Found unknown image, 0x%x\n", instr); 404948c090dSVarun Wadekar } 405948c090dSVarun Wadekar 406948c090dSVarun Wadekar SET_PARAM_HEAD(ep_info, PARAM_EP, VERSION_1, SECURE | EP_ST_ENABLE); 407948c090dSVarun Wadekar if (!aarch32) 408948c090dSVarun Wadekar ep_info->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, 409948c090dSVarun Wadekar DISABLE_ALL_EXCEPTIONS); 410948c090dSVarun Wadekar else 411948c090dSVarun Wadekar ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, 412948c090dSVarun Wadekar SPSR_E_LITTLE, 413948c090dSVarun Wadekar DAIF_FIQ_BIT | 414948c090dSVarun Wadekar DAIF_IRQ_BIT | 415948c090dSVarun Wadekar DAIF_ABT_BIT); 416948c090dSVarun Wadekar 417948c090dSVarun Wadekar bl31_register_bl32_init(trusty_init); 418948c090dSVarun Wadekar 419948c090dSVarun Wadekar psci_register_spd_pm_hook(&trusty_pm); 420948c090dSVarun Wadekar 421948c090dSVarun Wadekar flags = 0; 422948c090dSVarun Wadekar set_interrupt_rm_flag(flags, NON_SECURE); 423948c090dSVarun Wadekar ret = register_interrupt_type_handler(INTR_TYPE_S_EL1, 424948c090dSVarun Wadekar trusty_fiq_handler, 425948c090dSVarun Wadekar flags); 426948c090dSVarun Wadekar if (ret) 427948c090dSVarun Wadekar ERROR("trusty: failed to register fiq handler, ret = %d\n", ret); 428948c090dSVarun Wadekar 429948c090dSVarun Wadekar return 0; 430948c090dSVarun Wadekar } 431948c090dSVarun Wadekar 432948c090dSVarun Wadekar /* Define a SPD runtime service descriptor for fast SMC calls */ 433948c090dSVarun Wadekar DECLARE_RT_SVC( 434948c090dSVarun Wadekar trusty_fast, 435948c090dSVarun Wadekar 436948c090dSVarun Wadekar OEN_TOS_START, 437948c090dSVarun Wadekar SMC_ENTITY_SECURE_MONITOR, 438948c090dSVarun Wadekar SMC_TYPE_FAST, 439948c090dSVarun Wadekar trusty_setup, 440948c090dSVarun Wadekar trusty_smc_handler 441948c090dSVarun Wadekar ); 442948c090dSVarun Wadekar 443948c090dSVarun Wadekar /* Define a SPD runtime service descriptor for standard SMC calls */ 444948c090dSVarun Wadekar DECLARE_RT_SVC( 445948c090dSVarun Wadekar trusty_std, 446948c090dSVarun Wadekar 447f6e8ead4SAmith OEN_TAP_START, 448948c090dSVarun Wadekar SMC_ENTITY_SECURE_MONITOR, 449948c090dSVarun Wadekar SMC_TYPE_STD, 450948c090dSVarun Wadekar NULL, 451948c090dSVarun Wadekar trusty_smc_handler 452948c090dSVarun Wadekar ); 453